source
stringlengths
3
92
c
stringlengths
26
2.25M
clm.c
#include "clm.h" #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "model.h" #include "type.h" #include "util.h" lbfgsfloatval_t evaluate_clm(void *instance, const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n, const lbfgsfloatval_t step) { model_t *m = (model_t *)instance; unsigned char *msa = m->msa; int nrow = m->nrow; int ncol = m->ncol; double *w = m->w; double neff = m->neff; int threads_num = m->threads_num; int nsingle = ncol * ALPHA; // initialize double *objective_all = malloc(threads_num * sizeof(double)); memset(objective_all, 0.0, sizeof(double) * threads_num); double **gradient_all = m->gradient_all; for (int t = 0; t < threads_num; ++t) { memset(gradient_all[t], 0.0, sizeof(double) * n); } // start openmp parallel int pair_num = ncol * (ncol - 1) / 2; int *left = (int *)malloc(sizeof(int) * pair_num); int *right = (int *)malloc(sizeof(int) * pair_num); int cnt = 0; for (int i = 0; i < ncol; i++) { for (int j = 0; j < ncol; j++) { left[cnt] = i; left[cnt] = j; cnt += 1; } } int per = pair_num / threads_num; omp_set_num_threads(threads_num); #pragma omp parallel for for (int t = 0; t < threads_num; t++) { int pos_begin = per * t; int pos_end = per * (t + 1); if (t == threads_num - 1) { pos_end = pair_num; } lbfgsfloatval_t *pre_prob = lbfgs_malloc(ALPHA2); lbfgsfloatval_t *prob = lbfgs_malloc(ALPHA2); lbfgsfloatval_t *prob1 = lbfgs_malloc(ALPHA); lbfgsfloatval_t *prob2 = lbfgs_malloc(ALPHA); for (int c = pos_begin; c < pos_end; c++) { int pos1 = left[c]; int pos2 = right[c]; // for each instance for (int r = 0; r < nrow; r++) { // observed amino acids unsigned char *seq = msa + r * ncol; char o_aa1 = seq[pos1]; char o_aa2 = seq[pos2]; // calcuate the conditional likelihood memset(pre_prob, 0, sizeof(lbfgsfloatval_t) * ALPHA2); for (int aa1 = 0; aa1 < ALPHA; aa1++) { for (int aa2 = 0; aa2 < ALPHA; aa2++) { pre_prob[aa1 * ALPHA + aa2] = x[INDEX1(pos1, aa1)] + x[INDEX1(pos2, aa2)] + x[INDEX2(pos1, pos2, aa1, aa2)]; } } memset(prob1, 0, sizeof(lbfgsfloatval_t) * ALPHA); memset(prob2, 0, sizeof(lbfgsfloatval_t) * ALPHA); for (int i = 0; i < ncol; i++) { if (i == pos1 || i == pos2) { continue; } for (int aa = 0; aa < ALPHA; aa++) { if (i < pos1) { prob1[aa] += x[INDEX2(i, pos1, seq[i], aa)]; } else if (i > pos1) { prob1[aa] += x[INDEX2(pos1, i, aa, seq[i])]; } if (i < pos2) { prob2[aa] += x[INDEX2(i, pos2, seq[i], aa)]; } else if (i > pos1) { prob2[aa] += x[INDEX2(pos2, i, aa, seq[i])]; } } } double sum = 0.0; for (int aa1 = 0; aa1 < ALPHA; aa1++) { for (int aa2 = 0; aa2 < ALPHA; aa2++) { pre_prob[aa1 * ALPHA + aa2] += prob1[aa1] + prob2[aa2]; sum += exp(pre_prob[aa1 * ALPHA + aa2]); } } double logz = log(sum); memset(prob1, 0, sizeof(lbfgsfloatval_t) * ALPHA); memset(prob2, 0, sizeof(lbfgsfloatval_t) * ALPHA); for (int aa1 = 0; aa1 < ALPHA; aa1++) { for (int aa2 = 0; aa2 < ALPHA; aa2++) { prob[aa1 * ALPHA + aa2] = exp(pre_prob[aa1 * ALPHA + aa2] - logz); prob1[aa1] += prob[aa1 * ALPHA + aa2]; prob2[aa2] += prob[aa1 * ALPHA + aa2]; } } // end: calculate the conditional probatility // objective objective_all[t] += logz - pre_prob[o_aa1 * ALPHA + o_aa2]; // debug // printf("%3d %3d %.4f %.4f %4.f %4.f %4.f\n", pos1, pos2, // pre_prob[o_aa1 * ALPHA + o_aa2], prob[o_aa1 * ALPHA + o_aa2], // prob1[o_aa1], prob2[o_aa2], prob1[o_aa1] * prob2[o_aa2]); // gradient for signle item gradient_all[t][INDEX1(pos1, o_aa1)] -= w[r]; gradient_all[t][INDEX1(pos2, o_aa2)] -= w[r]; // expection for (int aa = 0; aa < ALPHA; aa++) { gradient_all[t][INDEX1(pos1, aa)] += w[r] * prob1[aa]; gradient_all[t][INDEX1(pos2, aa)] += w[r] * prob2[aa]; } // gradient for pair couplings gradient_all[t][INDEX2(pos1, pos2, o_aa1, o_aa2)] -= w[r]; for (int i = 0; i < ncol; i++) { if (i == pos1 || i == pos2) { continue; } if (i < pos1) { gradient_all[t][INDEX2(i, pos1, seq[i], o_aa1)] -= w[r]; } else if (i > pos1) { gradient_all[t][INDEX2(pos1, i, o_aa1, seq[i])] -= w[r]; } if (i < pos2) { gradient_all[t][INDEX2(i, pos2, seq[i], o_aa2)] -= w[r]; } else if (i > pos2) { gradient_all[t][INDEX2(pos2, i, o_aa2, seq[i])] -= w[r]; } } // expectation for (int aa1 = 0; aa1 < ALPHA; aa1++) { for (int aa2 = 0; aa2 < ALPHA; aa2++) { gradient_all[t][INDEX2(pos1, pos2, aa1, aa2)] += w[r] * prob[aa1 * ALPHA + aa2]; } } for (int i = 0; i < ncol; i++) { if (i == pos1 || i == pos2) { continue; } for (int aa = 0; aa < ALPHA; aa++) { if (i < pos1) { gradient_all[t][INDEX2(i, pos1, seq[i], aa)] += w[r] * prob1[aa]; } else if (i > pos1) { gradient_all[t][INDEX2(pos1, i, aa, seq[i])] += w[r] * prob1[aa]; } if (i < pos2) { gradient_all[t][INDEX2(i, pos2, seq[i], aa)] += w[r] * prob2[aa]; } else if (i > pos2) { gradient_all[t][INDEX2(pos2, i, aa, seq[i])] += w[r] * prob2[aa]; } } } // pair gradients } // end row } free(prob); free(prob1); free(prob2); free(pre_prob); } free(left); free(right); // reduction model lbfgsfloatval_t fx = 0.0; memset(g, 0, sizeof(lbfgsfloatval_t) * n); for (int t = 0; t < threads_num; ++t) { // printf("objective[%d]=%f\n", r, objective_all[r]); fx += objective_all[t]; // printf("fx= %f\n", fx); for (int i = 0; i < n; ++i) { g[i] += gradient_all[t][i]; } } // free memory free(objective_all); double norm = pair_num * 2.0 / ncol; // printf("fx %f neff %f\n", fx, m->mNeff); fx /= norm; for (int i = 0; i < n; i++) { g[i] /= norm; } // add regularization lbfgsfloatval_t lambda_single = m->neff * m->lambda_single; lbfgsfloatval_t lambda_pair = m->neff * m->lambda_pair; for (int i = 0; i < nsingle; i++) { fx += lambda_single * x[i] * x[i]; g[i] += 2.0 * lambda_single * x[i]; } for (int i = nsingle; i < n; i++) { fx += lambda_pair * x[i] * x[i]; g[i] += 2.0 * lambda_pair * x[i]; } return fx; } int progress_clm(void *instance, const lbfgsfloatval_t *x, const lbfgsfloatval_t *g, const lbfgsfloatval_t fx, const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm, const lbfgsfloatval_t step, int n, int k, int ls) { model_t *model = (model_t *)instance; model->iter = k; fprintf(model->flog, "iter= %d fx= %f xnorm = %f gnorm = %f step= %f ", k, fx, xnorm, gnorm, step); evaluate_model(model); printf("iter= %d fx= %f, xnorm = %f, gnorm = %f, step= %f ", k, fx, xnorm, gnorm, step); printf("orig_acc "); for (int i = 0; i < 8; i++) { printf("%.4f ", model->mat_acc[i]); } printf("apc_acc "); for (int i = 0; i < 8; i++) { printf("%.4f ", model->apc_acc[i]); } printf("\n"); return 0; }
reduction_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_REDUCTION_UTILITIES_H_INCLUDED ) #define KRATOS_REDUCTION_UTILITIES_H_INCLUDED // System includes #include <iostream> #include <array> #include <vector> #include <tuple> #include <cmath> #include <limits> #include <future> #include <thread> // External includes #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif // Project includes #include "includes/define.h" #include "includes/global_variables.h" namespace Kratos { ///@addtogroup KratosCore /** @brief utility function to do a sum reduction */ template<class TDataType> class SumReduction { public: typedef TDataType value_type; TDataType mvalue = TDataType(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TDataType GetValue() const { return mvalue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mvalue += value; } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const SumReduction<TDataType>& rOther) { #pragma omp atomic mvalue += rOther.mvalue; } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType> class SubReduction { public: typedef TDataType value_type; TDataType mvalue = TDataType(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TDataType GetValue() const { return mvalue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mvalue -= value; } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const SubReduction<TDataType>& rOther) { #pragma omp atomic mvalue += rOther.mvalue; } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType> class MaxReduction { public: typedef TDataType value_type; TDataType mvalue = std::numeric_limits<TDataType>::lowest(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TDataType GetValue() const { return mvalue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mvalue = std::max(mvalue,value); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const MaxReduction<TDataType>& rOther) { #pragma omp critical mvalue = std::max(mvalue,rOther.mvalue); } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType> class MinReduction { public: typedef TDataType value_type; TDataType mvalue = std::numeric_limits<TDataType>::max(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TDataType GetValue() const { return mvalue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mvalue = std::min(mvalue,value); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const MinReduction<TDataType>& rOther) { #pragma omp critical mvalue = std::min(mvalue,rOther.mvalue); } }; template <class... Reducer> struct CombinedReduction { typedef std::tuple<typename Reducer::value_type...> value_type; std::tuple<Reducer...> mChild; CombinedReduction() {} /// access to reduced value value_type GetValue(){ value_type return_value; fill_value<0>(return_value); return return_value; } template <int I, class T> typename std::enable_if<(I < sizeof...(Reducer)), void>::type fill_value(T& v) { std::get<I>(v) = std::get<I>(mChild).GetValue(); fill_value<I+1>(v); }; template <int I, class T> typename std::enable_if<(I == sizeof...(Reducer)), void>::type fill_value(T& v) {} /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread template <class... T> void LocalReduce(const std::tuple<T...> &&v) { // Static recursive loop over tuple elements reduce_local<0>(v); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const CombinedReduction &other) { reduce_global<0>(other); } private: template <int I, class T> typename std::enable_if<(I < sizeof...(Reducer)), void>::type reduce_local(T &&v) { std::get<I>(mChild).LocalReduce(std::get<I>(v)); reduce_local<I+1>(std::forward<T>(v)); }; template <int I, class T> typename std::enable_if<(I == sizeof...(Reducer)), void>::type reduce_local(T &&v) { // Exit static recursion } template <int I> typename std::enable_if<(I < sizeof...(Reducer)), void>::type reduce_global(const CombinedReduction &other) { std::get<I>(mChild).ThreadSafeReduce(std::get<I>(other.mChild)); reduce_global<I+1>(other); } template <int I> typename std::enable_if<(I == sizeof...(Reducer)), void>::type reduce_global(const CombinedReduction &other) { // Exit static recursion } }; } // namespace Kratos. #endif // KRATOS_REDUCTION_UTILITIES_H_INCLUDED defined
nstream-usm-target.c
/// /// Copyright (c) 2019, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #pragma omp requires unified_shared_memory #include "prk_util.h" #include "prk_openmp.h" int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_initial_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_initial_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; int host = omp_get_initial_device(); size_t bytes = length*sizeof(double); double * restrict A = omp_target_alloc(bytes, host); double * restrict B = omp_target_alloc(bytes, host); double * restrict C = omp_target_alloc(bytes, host); double scalar = 3.0; #pragma omp parallel for simd schedule(static) for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = omp_get_wtime(); #pragma omp target teams distribute parallel for simd schedule(static) device(device) for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = omp_get_wtime() - nstream_time; } omp_target_free(C, host); omp_target_free(B, host); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; #pragma omp parallel for reduction(+:asum) for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } omp_target_free(A, host); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
fill_r_3c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel { int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di*di*di*comp + cache_size/2)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
GB_binop__second_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_int32 // A.*B function (eWiseMult): GB_AemultB__second_int32 // A*D function (colscale): GB_AxD__second_int32 // D*A function (rowscale): GB_DxB__second_int32 // C+=B function (dense accum): GB_Cdense_accumB__second_int32 // C+=b function (dense accum): GB_Cdense_accumb__second_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_int32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_int32 // C=A'+scalar GB_bind2nd_tran__second_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = bij #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__second_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
empty.c
/* { dg-do compile } */ /* { dg-options "-O -fopenmp -fdump-tree-ompexp" } */ int main() { #pragma omp parallel {;} } /* There should not be a GOMP_parallel_start call. */ /* { dg-final { scan-tree-dump-times "GOMP_parallel_start" 0 "ompexp"} } */
make_graph.c
/* Copyright (C) 2009-2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include <limits.h> #include <assert.h> #include <math.h> #ifdef __MTA__ #include <sys/mta_task.h> #endif #ifdef GRAPH_GENERATOR_MPI #include <mpi.h> #endif #ifdef GRAPH_GENERATOR_OMP #include <omp.h> #endif /* Simplified interface to build graphs with scrambled vertices. */ #include "graph_generator.h" #include "permutation_gen.h" #include "apply_permutation_mpi.h" #include "scramble_edges.h" #include "utils.h" #ifdef GRAPH_GENERATOR_SEQ void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) { int64_t N, M; /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES generated_edge* edges; #else int64_t* edges; #endif int64_t nedges; int64_t* vertex_perm; int64_t* result; int64_t i; mrg_state state; int64_t v1; int64_t v2; N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts); M = desired_nedges; make_mrg_seed(userseed1, userseed2, seed); nedges = compute_edge_array_size(0, 1, M); *nedges_ptr = nedges; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES edges = (generated_edge*)xcalloc(nedges, sizeof(generated_edge)); /* multiplicity set to 0 for unused edges */ #else edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #endif generate_kronecker(0, 1, seed, log_numverts, M, initiator, edges); vertex_perm = (int64_t*)xmalloc(N * sizeof(int64_t)); /* result; AL: this is a needless warning about unused code. */ #ifdef GRAPHGEN_KEEP_MULTIPLICITIES result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #else result = edges; #endif *result_ptr = result; mrg_seed(&state, seed); rand_sort_shared(&state, N, vertex_perm); /* Apply vertex permutation to graph, optionally copying into user's result * array. */ #ifdef GRAPHGEN_KEEP_MULTIPLICITIES for (i = 0; i < nedges; ++i) { if (edges[i].multiplicity != 0) { v1 = vertex_perm[edges[i].src]; v2 = vertex_perm[edges[i].tgt]; /* Sort these since otherwise the directions of the permuted edges would * give away the unscrambled vertex order. */ result[i * 2] = (v1 < v2) ? v1 : v2; result[i * 2 + 1] = (v1 < v2) ? v2 : v1; } else { result[i * 2] = result[i * 2 + 1] = (int64_t)(-1); } } free(edges); #else for (i = 0; i < 2 * nedges; i += 2) { if (edges[i] != (int64_t)(-1)) { v1 = vertex_perm[edges[i]]; v2 = vertex_perm[edges[i + 1]]; /* Sort these since otherwise the directions of the permuted edges would * give away the unscrambled vertex order. */ edges[i] = (v1 < v2) ? v1 : v2; edges[i + 1] = (v1 < v2) ? v2 : v1; } } #endif free(vertex_perm); /* Randomly mix up the order of the edges. */ scramble_edges_shared(userseed1, userseed2, nedges, edges); } #endif /* GRAPH_GENERATOR_SEQ */ #ifdef __MTA__ void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) { int64_t N, M; N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts); M = (int64_t)desired_nedges; /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(userseed1, userseed2, seed); int64_t nedges = compute_edge_array_size(0, 1, M); *nedges_ptr = nedges; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES generated_edge* edges = (generated_edge*)xcalloc(nedges, sizeof(generated_edge)); /* multiplicity set to 0 for unused edges */ #else int64_t* edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #endif int rank, size; /* The "for all streams" here is in compiler versions >= 6.4 */ #pragma mta use 100 streams #pragma mta for all streams rank of size { double my_initiator[GRAPHGEN_INITIATOR_SIZE * GRAPHGEN_INITIATOR_SIZE]; /* Local copy */ int i; for (i = 0; i < GRAPHGEN_INITIATOR_SIZE * GRAPHGEN_INITIATOR_SIZE; ++i) { my_initiator[i] = initiator[i]; } generate_kronecker(rank, size, seed, log_numverts, M, my_initiator, edges); } int64_t* vertex_perm = (int64_t*)xmalloc(N * sizeof(int64_t)); int64_t* result; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #else result = edges; #endif *result_ptr = result; mrg_state state; mrg_seed(&state, seed); rand_sort_shared(&state, N, vertex_perm); int64_t i; /* Apply vertex permutation to graph, optionally copying into user's result * array. */ #ifdef GRAPHGEN_KEEP_MULTIPLICITIES #pragma mta assert parallel #pragma mta block schedule for (i = 0; i < nedges; ++i) { if (edges[i].multiplicity != 0) { int64_t v1 = vertex_perm[edges[i].src]; int64_t v2 = vertex_perm[edges[i].tgt]; /* Sort these since otherwise the directions of the permuted edges would * give away the unscrambled vertex order. */ result[i * 2] = MTA_INT_MIN(v1, v2); result[i * 2 + 1] = MTA_INT_MAX(v1, v2); } else { result[i * 2] = result[i * 2 + 1] = (int64_t)(-1); } } free(edges); #else #pragma mta assert parallel #pragma mta block schedule for (i = 0; i < 2 * nedges; i += 2) { if (edges[i] != (int64_t)(-1)) { int64_t v1 = vertex_perm[edges[i]]; int64_t v2 = vertex_perm[edges[i + 1]]; /* Sort these since otherwise the directions of the permuted edges would * give away the unscrambled vertex order. */ edges[i] = MTA_INT_MIN(v1, v2); edges[i + 1] = MTA_INT_MAX(v1, v2); } } #endif free(vertex_perm); /* Randomly mix up the order of the edges. */ scramble_edges_shared(userseed1, userseed2, nedges, edges); } #endif /* __MTA__ */ #ifdef GRAPH_GENERATOR_OMP void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) { int64_t N, M; N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts); M = desired_nedges; /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(userseed1, userseed2, seed); int64_t nedges = compute_edge_array_size(0, 1, M); *nedges_ptr = nedges; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES generated_edge* edges = (generated_edge*)xcalloc(nedges, sizeof(generated_edge)); /* multiplicity set to 0 for unused edges */ #else int64_t* edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #endif #pragma omp parallel { int rank = omp_get_thread_num(), size = omp_get_num_threads(); generate_kronecker(rank, size, seed, log_numverts, M, initiator, edges); } int64_t* vertex_perm = (int64_t*)xmalloc(N * sizeof(int64_t)); int64_t* result; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #else result = edges; #endif *result_ptr = result; mrg_state state; mrg_seed(&state, seed); rand_sort_shared(&state, N, vertex_perm); int64_t i; /* Apply vertex permutation to graph, optionally copying into user's result * array. */ #ifdef GRAPHGEN_KEEP_MULTIPLICITIES #pragma omp parallel for for (i = 0; i < nedges; ++i) { if (edges[i].multiplicity != 0) { int64_t v1 = vertex_perm[edges[i].src]; int64_t v2 = vertex_perm[edges[i].tgt]; /* Sort these since otherwise the directions of the permuted edges would * give away the unscrambled vertex order. */ result[i * 2] = (v1 < v2) ? v1 : v2; result[i * 2 + 1] = (v1 < v2) ? v2 : v1; } else { result[i * 2] = result[i * 2 + 1] = (int64_t)(-1); } } free(edges); #else #pragma omp parallel for for (i = 0; i < 2 * nedges; i += 2) { if (edges[i] != (int64_t)(-1)) { int64_t v1 = vertex_perm[edges[i]]; int64_t v2 = vertex_perm[edges[i + 1]]; /* Sort these since otherwise the directions of the permuted edges would * give away the unscrambled vertex order. */ edges[i] = (v1 < v2) ? v1 : v2; edges[i + 1] = (v1 < v2) ? v2 : v1; } } #endif free(vertex_perm); /* Randomly mix up the order of the edges. */ scramble_edges_shared(userseed1, userseed2, nedges, edges); } #endif /* GRAPH_GENERATOR_OMP */ #ifdef GRAPH_GENERATOR_MPI void make_graph(int log_numverts, int64_t desired_nedges, uint64_t userseed1, uint64_t userseed2, const double initiator[4], int64_t* nedges_ptr, int64_t** result_ptr) { int64_t N, M; int rank, size; N = (int64_t)pow(GRAPHGEN_INITIATOR_SIZE, log_numverts); M = desired_nedges; /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(userseed1, userseed2, seed); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); int64_t nedges = compute_edge_array_size(rank, size, M); #ifdef GRAPHGEN_KEEP_MULTIPLICITIES generated_edge* local_edges = (generated_edge*)xmalloc(nedges * sizeof(generated_edge)); #else int64_t* local_edges = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); #endif double start = MPI_Wtime(); generate_kronecker(rank, size, seed, log_numverts, M, initiator, local_edges); double gen_time = MPI_Wtime() - start; int64_t* local_vertex_perm = NULL; mrg_state state; mrg_seed(&state, seed); start = MPI_Wtime(); int64_t perm_local_size; rand_sort_mpi(MPI_COMM_WORLD, &state, N, &perm_local_size, &local_vertex_perm); double perm_gen_time = MPI_Wtime() - start; /* Copy the edge endpoints into the result array if necessary. */ int64_t* result; #ifdef GRAPHGEN_KEEP_MULTIPLICITIES result = (int64_t*)xmalloc(2 * nedges * sizeof(int64_t)); for (i = 0; i < nedges; ++i) { if (local_edges[i].multiplicity != 0) { result[i * 2] = local_edges[i].src; result[i * 2 + 1] = local_edges[i].tgt; } else { result[i * 2] = result[i * 2 + 1] = (int64_t)(-1); } } free(local_edges); local_edges = NULL; #else result = local_edges; *result_ptr = result; local_edges = NULL; /* Freed by caller */ #endif /* Apply vertex permutation to graph. */ start = MPI_Wtime(); apply_permutation_mpi(MPI_COMM_WORLD, perm_local_size, local_vertex_perm, N, nedges, result); double perm_apply_time = MPI_Wtime() - start; free(local_vertex_perm); local_vertex_perm = NULL; /* Randomly mix up the order of the edges. */ start = MPI_Wtime(); int64_t* new_result; int64_t nedges_out; scramble_edges_mpi(MPI_COMM_WORLD, userseed1, userseed2, nedges, result, &nedges_out, &new_result); double edge_scramble_time = MPI_Wtime() - start; free(result); result = NULL; *result_ptr = new_result; *nedges_ptr = nedges_out; if (rank == 0) { fprintf(stdout, "unpermuted_graph_generation: %f s\n", gen_time); fprintf(stdout, "vertex_permutation_generation: %f s\n", perm_gen_time); fprintf(stdout, "vertex_permutation_application: %f s\n", perm_apply_time); fprintf(stdout, "edge_scrambling: %f s\n", edge_scramble_time); } } #endif /* PRNG interface for implementations; takes seed in same format as given by * users, and creates a vector of doubles in a reproducible (and * random-access) way. */ void make_random_numbers( /* in */ int64_t nvalues /* Number of values to generate */, /* in */ uint64_t userseed1 /* Arbitrary 64-bit seed value */, /* in */ uint64_t userseed2 /* Arbitrary 64-bit seed value */, /* in */ int64_t position /* Start index in random number stream */, /* out */ double* result /* Returned array of values */ ) { int64_t i; uint_fast32_t seed[5]; mrg_state st; make_mrg_seed(userseed1, userseed2, seed); mrg_seed(&st, seed); mrg_skip(&st, 2, 0, 2 * position); /* Each double takes two PRNG outputs */ for (i = 0; i < nvalues; ++i) { result[i] = mrg_get_double_orig(&st); } }
omp_for_firstprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int sum1; #pragma omp threadprivate(sum1) int test_omp_for_firstprivate() { int sum; int sum0; int known_sum; int threadsnum; sum = 0; sum0 = 12345; sum1 = 0; #pragma omp parallel { #pragma omp single { threadsnum=omp_get_num_threads(); } /* sum0 = 0; */ int i; #pragma omp for firstprivate(sum0) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; sum1 = sum0; } /* end of for */ #pragma omp critical { sum = sum + sum1; } /* end of critical */ } /* end of parallel */ known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_firstprivate()) { num_failed++; } } return num_failed; }
convolution_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __AVX__ static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 8 x 8 kernel_tm.create(8 * kernel_size, inch, outch / 8 + (outch % 8) / 4 + outch % 4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p + 0) * inch * kernel_size; const float* k1 = kernel + (p + 1) * inch * kernel_size; const float* k2 = kernel + (p + 2) * inch * kernel_size; const float* k3 = kernel + (p + 3) * inch * kernel_size; const float* k4 = kernel + (p + 4) * inch * kernel_size; const float* k5 = kernel + (p + 5) * inch * kernel_size; const float* k6 = kernel + (p + 6) * inch * kernel_size; const float* k7 = kernel + (p + 7) * inch * kernel_size; float* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p + 0) * inch * kernel_size; const float* k1 = kernel + (p + 1) * inch * kernel_size; const float* k2 = kernel + (p + 2) * inch * kernel_size; const float* k3 = kernel + (p + 3) * inch * kernel_size; float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4); for (int q = 0; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { const float* k0 = kernel + (p + 0) * inch * kernel_size; float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); for (int q = 0; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw * outh, kernel_h * kernel_w * inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h * kernel_w * outw * outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8 * kernel_size, inch, out_size / 8 + out_size % 8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i / 8); for (int q = 0; q < inch * kernel_size; q++) { #if __AVX__ _mm256_storeu_ps(tmpptr, _mm256_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __SSE__ tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i / 8 + i % 8); for (int q = 0; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i + 1); float* output2 = top_blob.channel(i + 2); float* output3 = top_blob.channel(i + 3); float* output4 = top_blob.channel(i + 4); float* output5 = top_blob.channel(i + 5); float* output6 = top_blob.channel(i + 6); float* output7 = top_blob.channel(i + 7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j = 0; for (; j + 7 < N; j = j + 8) { const float* vb = bottom_tm.channel(j / 8); const float* va = kernel_tm.channel(i / 8); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr + 1); __m256 _sum2 = _mm256_broadcast_ss(biasptr + 2); __m256 _sum3 = _mm256_broadcast_ss(biasptr + 3); __m256 _sum4 = _mm256_broadcast_ss(biasptr + 4); __m256 _sum5 = _mm256_broadcast_ss(biasptr + 5); __m256 _sum6 = _mm256_broadcast_ss(biasptr + 6); __m256 _sum7 = _mm256_broadcast_ss(biasptr + 7); int k = 0; for (; k + 3 < L; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k = 0; for (; k + 7 < L; k = k + 8) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n + 8]; sum1[n] += va[1] * vb[n + 8]; sum2[n] += va[2] * vb[n + 8]; sum3[n] += va[3] * vb[n + 8]; sum4[n] += va[4] * vb[n + 8]; sum5[n] += va[5] * vb[n + 8]; sum6[n] += va[6] * vb[n + 8]; sum7[n] += va[7] * vb[n + 8]; va += 8; sum0[n] += va[0] * vb[n + 16]; sum1[n] += va[1] * vb[n + 16]; sum2[n] += va[2] * vb[n + 16]; sum3[n] += va[3] * vb[n + 16]; sum4[n] += va[4] * vb[n + 16]; sum5[n] += va[5] * vb[n + 16]; sum6[n] += va[6] * vb[n + 16]; sum7[n] += va[7] * vb[n + 16]; va += 8; sum0[n] += va[0] * vb[n + 24]; sum1[n] += va[1] * vb[n + 24]; sum2[n] += va[2] * vb[n + 24]; sum3[n] += va[3] * vb[n + 24]; sum4[n] += va[4] * vb[n + 24]; sum5[n] += va[5] * vb[n + 24]; sum6[n] += va[6] * vb[n + 24]; sum7[n] += va[7] * vb[n + 24]; va += 8; sum0[n] += va[0] * vb[n + 32]; sum1[n] += va[1] * vb[n + 32]; sum2[n] += va[2] * vb[n + 32]; sum3[n] += va[3] * vb[n + 32]; sum4[n] += va[4] * vb[n + 32]; sum5[n] += va[5] * vb[n + 32]; sum6[n] += va[6] * vb[n + 32]; sum7[n] += va[7] * vb[n + 32]; va += 8; sum0[n] += va[0] * vb[n + 40]; sum1[n] += va[1] * vb[n + 40]; sum2[n] += va[2] * vb[n + 40]; sum3[n] += va[3] * vb[n + 40]; sum4[n] += va[4] * vb[n + 40]; sum5[n] += va[5] * vb[n + 40]; sum6[n] += va[6] * vb[n + 40]; sum7[n] += va[7] * vb[n + 40]; va += 8; sum0[n] += va[0] * vb[n + 48]; sum1[n] += va[1] * vb[n + 48]; sum2[n] += va[2] * vb[n + 48]; sum3[n] += va[3] * vb[n + 48]; sum4[n] += va[4] * vb[n + 48]; sum5[n] += va[5] * vb[n + 48]; sum6[n] += va[6] * vb[n + 48]; sum7[n] += va[7] * vb[n + 48]; va += 8; sum0[n] += va[0] * vb[n + 56]; sum1[n] += va[1] * vb[n + 56]; sum2[n] += va[2] * vb[n + 56]; sum3[n] += va[3] * vb[n + 56]; sum4[n] += va[4] * vb[n + 56]; sum5[n] += va[5] * vb[n + 56]; sum6[n] += va[6] * vb[n + 56]; sum7[n] += va[7] * vb[n + 56]; va -= 56; } va += 64; vb += 64; } for (; k < L; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { const float* vb = bottom_tm.channel(j / 8 + j % 8); const float* va = kernel_tm.channel(i / 8); #if __AVX__ __m256 _sum0_7 = _mm256_loadu_ps(biasptr); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < L; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < L; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k = 0; k < L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i + 1); float* output2 = top_blob.channel(i + 2); float* output3 = top_blob.channel(i + 3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j = 0; for (; j + 7 < N; j = j + 8) { const float* vb = bottom_tm.channel(j / 8); const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr + 1); __m256 _sum2 = _mm256_broadcast_ss(biasptr + 2); __m256 _sum3 = _mm256_broadcast_ss(biasptr + 3); int k = 0; for (; k + 3 < L; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k = 0; for (; k + 7 < L; k = k + 8) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n + 8]; sum1[n] += va[1] * vb[n + 8]; sum2[n] += va[2] * vb[n + 8]; sum3[n] += va[3] * vb[n + 8]; va += 4; sum0[n] += va[0] * vb[n + 16]; sum1[n] += va[1] * vb[n + 16]; sum2[n] += va[2] * vb[n + 16]; sum3[n] += va[3] * vb[n + 16]; va += 4; sum0[n] += va[0] * vb[n + 24]; sum1[n] += va[1] * vb[n + 24]; sum2[n] += va[2] * vb[n + 24]; sum3[n] += va[3] * vb[n + 24]; va += 4; sum0[n] += va[0] * vb[n + 32]; sum1[n] += va[1] * vb[n + 32]; sum2[n] += va[2] * vb[n + 32]; sum3[n] += va[3] * vb[n + 32]; va += 4; sum0[n] += va[0] * vb[n + 40]; sum1[n] += va[1] * vb[n + 40]; sum2[n] += va[2] * vb[n + 40]; sum3[n] += va[3] * vb[n + 40]; va += 4; sum0[n] += va[0] * vb[n + 48]; sum1[n] += va[1] * vb[n + 48]; sum2[n] += va[2] * vb[n + 48]; sum3[n] += va[3] * vb[n + 48]; va += 4; sum0[n] += va[0] * vb[n + 56]; sum1[n] += va[1] * vb[n + 56]; sum2[n] += va[2] * vb[n + 56]; sum3[n] += va[3] * vb[n + 56]; va -= 28; } va += 32; vb += 64; } for (; k < L; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { const float* vb = bottom_tm.channel(j / 8 + j % 8); const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4); #if __AVX__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < L; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k = 0; k < L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j = 0; for (; j + 7 < N; j = j + 8) { const float* vb = bottom_tm.channel(j / 8); const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4 + i % 4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(&bias0); int k = 0; for (; k + 3 < L; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; int k = 0; for (; k + 7 < L; k = k + 8) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n + 8]; sum[n] += va[2] * vb[n + 16]; sum[n] += va[3] * vb[n + 24]; sum[n] += va[4] * vb[n + 32]; sum[n] += va[5] * vb[n + 40]; sum[n] += va[6] * vb[n + 48]; sum[n] += va[7] * vb[n + 56]; } va += 8; vb += 64; } for (; k < L; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n] + bias0; } #endif // __AVX__ output += 8; } for (; j < N; j++) { const float* vb = bottom_tm.channel(j / 8 + j % 8); const float* va = kernel_tm.channel(i / 8 + (i % 8) / 4 + i % 4); int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < L; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); vb += 4; __m128 _k0 = _mm_loadu_ps(va); va += 4; _sum0 = _mm_fmadd_ps(_p0, _k0, _sum0); } float output_sum0[4] = {0.f}; _mm_storeu_ps(output_sum0, _sum0); float sum0 = bias0 + output_sum0[0] + output_sum0[1] + output_sum0[2] + output_sum0[3]; #else float sum0 = bias0; #endif // __AVX__ for (; k < L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #else static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 4 x 4 kernel_tm.create(4 * kernel_size, inch, outch / 4 + outch % 4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const float* k0 = kernel + (p + 0) * inch * kernel_size; const float* k1 = kernel + (p + 1) * inch * kernel_size; const float* k2 = kernel + (p + 2) * inch * kernel_size; const float* k3 = kernel + (p + 3) * inch * kernel_size; float* ktmp = kernel_tm.channel(p / 4); for (int q = 0; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const float* k0 = kernel + (p + 0) * inch * kernel_size; float* ktmp = kernel_tm.channel(p / 4 + p % 4); for (int q = 0; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw * outh, kernel_h * kernel_w * inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h * kernel_w * outw * outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i / 4); for (int q = 0; q < inch * kernel_size; q++) { #if __SSE__ _mm_storeu_ps(tmpptr, _mm_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; #endif // __SSE__ tmpptr += 4; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i + 1); float* output2 = top_blob.channel(i + 2); float* output3 = top_blob.channel(i + 3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j = 0; for (; j + 3 < N; j = j + 4) { const float* vb = bottom_tm.channel(j / 4); const float* va = kernel_tm.channel(i / 4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(biasptr[0]); __m128 _sum1 = _mm_set1_ps(biasptr[1]); __m128 _sum2 = _mm_set1_ps(biasptr[2]); __m128 _sum3 = _mm_set1_ps(biasptr[3]); int k = 0; for (; k + 3 < L; k = k + 4) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30 // k1 _vb = _mm_loadu_ps(vb + 4); _va0 = _mm_set1_ps(va[4]); _va1 = _mm_set1_ps(va[5]); _va2 = _mm_set1_ps(va[6]); _va3 = _mm_set1_ps(va[7]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a10-a13) * k01 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a10-a13) * k11 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a10-a13) * k21 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a10-a13) * k31 // k2 _vb = _mm_loadu_ps(vb + 8); _va0 = _mm_set1_ps(va[8]); _va1 = _mm_set1_ps(va[9]); _va2 = _mm_set1_ps(va[10]); _va3 = _mm_set1_ps(va[11]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a20-a23) * k02 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a20-a23) * k12 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a20-a23) * k22 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a20-a23) * k32 // k3 _vb = _mm_loadu_ps(vb + 12); _va0 = _mm_set1_ps(va[12]); _va1 = _mm_set1_ps(va[13]); _va2 = _mm_set1_ps(va[14]); _va3 = _mm_set1_ps(va[15]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a30-a33) * k03 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a30-a33) * k13 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a30-a33) * k23 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a30-a33) * k33 va += 16; vb += 16; } for (; k < L; k++) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30 va += 4; vb += 4; } _mm_storeu_ps(output0, _sum0); _mm_storeu_ps(output1, _sum1); _mm_storeu_ps(output2, _sum2); _mm_storeu_ps(output3, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; int k = 0; for (; k + 7 < L; k = k + 8) { for (int n = 0; n < 4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n + 4]; sum1[n] += va[1] * vb[n + 4]; sum2[n] += va[2] * vb[n + 4]; sum3[n] += va[3] * vb[n + 4]; va += 4; sum0[n] += va[0] * vb[n + 8]; sum1[n] += va[1] * vb[n + 8]; sum2[n] += va[2] * vb[n + 8]; sum3[n] += va[3] * vb[n + 8]; va += 4; sum0[n] += va[0] * vb[n + 12]; sum1[n] += va[1] * vb[n + 12]; sum2[n] += va[2] * vb[n + 12]; sum3[n] += va[3] * vb[n + 12]; va += 4; sum0[n] += va[0] * vb[n + 16]; sum1[n] += va[1] * vb[n + 16]; sum2[n] += va[2] * vb[n + 16]; sum3[n] += va[3] * vb[n + 16]; va += 4; sum0[n] += va[0] * vb[n + 20]; sum1[n] += va[1] * vb[n + 20]; sum2[n] += va[2] * vb[n + 20]; sum3[n] += va[3] * vb[n + 20]; va += 4; sum0[n] += va[0] * vb[n + 24]; sum1[n] += va[1] * vb[n + 24]; sum2[n] += va[2] * vb[n + 24]; sum3[n] += va[3] * vb[n + 24]; va += 4; sum0[n] += va[0] * vb[n + 28]; sum1[n] += va[1] * vb[n + 28]; sum2[n] += va[2] * vb[n + 28]; sum3[n] += va[3] * vb[n + 28]; va -= 28; } va += 32; vb += 32; } for (; k < L; k++) { for (int n = 0; n < 4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __SSE__ output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { const float* vb = bottom_tm.channel(j / 4 + j % 4); const float* va = kernel_tm.channel(i / 4); #if __SSE__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < L; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); // sum0 += (k00-k30) * a00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1)); // sum1 += (k01-k31) * a10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2)); // sum2 += (k02-k32) * a20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3)); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0)); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3[0]; output1[0] = _sum0_3[1]; output2[0] = _sum0_3[2]; output3[0] = _sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k = 0; k < L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __SSE__ output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j = 0; for (; j + 3 < N; j = j + 4) { const float* vb = bottom_tm.channel(j / 4); const float* va = kernel_tm.channel(i / 4 + i % 4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(bias0); int k = 0; for (; k + 3 < L; k = k + 4) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); __m128 _vb0 = _mm_loadu_ps(vb); __m128 _vb1 = _mm_loadu_ps(vb + 4); __m128 _vb2 = _mm_loadu_ps(vb + 8); __m128 _vb3 = _mm_loadu_ps(vb + 12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1)); // sum0 += (a10-a13) * k01 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2)); // sum0 += (a20-a23) * k02 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3)); // sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k < L; k++) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _vb0 = _mm_loadu_ps(vb); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } _mm_storeu_ps(output, _sum0); #else float sum[4] = {0}; int k = 0; for (; k + 3 < L; k = k + 4) { for (int n = 0; n < 4; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n + 4]; sum[n] += va[2] * vb[n + 8]; sum[n] += va[3] * vb[n + 12]; //sum[n] += va[4] * vb[n+16]; //sum[n] += va[5] * vb[n+20]; //sum[n] += va[6] * vb[n+24]; //sum[n] += va[7] * vb[n+28]; } va += 4; vb += 16; } for (; k < L; k++) { for (int n = 0; n < 4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n] + bias0; } #endif // __SSE__ output += 4; } for (; j < N; j++) { const float* vb = bottom_tm.channel(j / 4 + j % 4); const float* va = kernel_tm.channel(i / 4 + i % 4); int k = 0; #if __SSE__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < L; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } float sum0 = bias0 + _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = bias0; #endif // __SSE__ for (; k < L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #endif
HelloOpenMP_fix3.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]){ #pragma omp parallel { int nthreads = omp_get_num_threads(); int thread_id = omp_get_thread_num(); #pragma omp single { printf("Goodbye slow serial world and Hello OpenMP!\n"); printf(" I have %d thread(s) and my thread id is %d\n",nthreads,thread_id); } } }
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Typedef declaractions. */ typedef struct _ConstituteInfo { const char *caption, *comment, *dispose, *label; MagickBooleanType sync_from_exif, sync_from_tiff; MagickStatusType delay_flags; size_t delay; ssize_t ticks_per_second; } ConstituteInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); switch (storage) { case CharPixel: image->depth=8*sizeof(unsigned char); break; case DoublePixel: image->depth=8*sizeof(double); break; case FloatPixel: image->depth=8*sizeof(float); break; case LongPixel: image->depth=8*sizeof(unsigned long); break; case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break; case ShortPixel: image->depth=8*sizeof(unsigned short); break; default: break; } length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } static void InitializeConstituteInfo(const ImageInfo *image_info, ConstituteInfo *constitute_info) { const char *option; memset(constitute_info,0,sizeof(*constitute_info)); constitute_info->sync_from_exif=MagickTrue; constitute_info->sync_from_tiff=MagickTrue; option=GetImageOption(image_info,"exif:sync-image"); if (IsStringFalse(option) != MagickFalse) constitute_info->sync_from_exif=MagickFalse; option=GetImageOption(image_info,"tiff:sync-image"); if (IsStringFalse(option) != MagickFalse) constitute_info->sync_from_tiff=MagickFalse; constitute_info->caption=GetImageOption(image_info,"caption"); constitute_info->comment=GetImageOption(image_info,"comment"); constitute_info->label=GetImageOption(image_info,"label"); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; constitute_info->delay_flags=ParseGeometry(option,&geometry_info); if (constitute_info->delay_flags != NoValue) { constitute_info->delay=floor(geometry_info.rho+0.5); if ((constitute_info->delay_flags & SigmaValue) != 0) constitute_info->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } } } static void SyncOrientationFromProperties(Image *image, ConstituteInfo *constitute_info,ExceptionInfo *exception) { const char *orientation; orientation=(const char *) NULL; if (constitute_info->sync_from_exif != MagickFalse) { orientation=GetImageProperty(image,"exif:Orientation",exception); if (orientation != (const char *) NULL) { image->orientation=(OrientationType) StringToLong(orientation); (void) DeleteImageProperty(image,"exif:Orientation"); } } if ((orientation == (const char *) NULL) && (constitute_info->sync_from_tiff != MagickFalse)) { orientation=GetImageProperty(image,"tiff:Orientation",exception); if (orientation != (const char *) NULL) { image->orientation=(OrientationType) StringToLong(orientation); (void) DeleteImageProperty(image,"tiff:Orientation"); } } } static void SyncResolutionFromProperties(Image *image, ConstituteInfo *constitute_info, ExceptionInfo *exception) { const char *resolution_x, *resolution_y, *resolution_units; MagickBooleanType used_tiff; resolution_x=(const char *) NULL; resolution_y=(const char *) NULL; resolution_units=(const char *) NULL; used_tiff=MagickFalse; if (constitute_info->sync_from_exif != MagickFalse) { resolution_x=GetImageProperty(image,"exif:XResolution",exception); resolution_y=GetImageProperty(image,"exif:YResolution",exception); if ((resolution_x != (const char *) NULL) && (resolution_y != (const char *) NULL)) resolution_units=GetImageProperty(image,"exif:ResolutionUnit", exception); } if ((resolution_x == (const char *) NULL) && (resolution_y == (const char *) NULL) && (constitute_info->sync_from_tiff != MagickFalse)) { resolution_x=GetImageProperty(image,"tiff:XResolution",exception); resolution_y=GetImageProperty(image,"tiff:YResolution",exception); if ((resolution_x != (const char *) NULL) && (resolution_y != (const char *) NULL)) { used_tiff=MagickTrue; resolution_units=GetImageProperty(image,"tiff:ResolutionUnit", exception); } } if ((resolution_x != (const char *) NULL) && (resolution_y != (const char *) NULL)) { GeometryInfo geometry_info; ssize_t option_type; geometry_info.rho=image->resolution.x; geometry_info.sigma=1.0; (void) ParseGeometry(resolution_x,&geometry_info); if (geometry_info.sigma != 0) image->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(resolution_x,',') != (char *) NULL) image->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; geometry_info.rho=image->resolution.y; geometry_info.sigma=1.0; (void) ParseGeometry(resolution_y,&geometry_info); if (geometry_info.sigma != 0) image->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(resolution_y,',') != (char *) NULL) image->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; if (resolution_units != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, resolution_units); if (option_type >= 0) image->units=(ResolutionType) option_type; } if (used_tiff == MagickFalse) { (void) DeleteImageProperty(image,"exif:XResolution"); (void) DeleteImageProperty(image,"exif:YResolution"); (void) DeleteImageProperty(image,"exif:ResolutionUnit"); } else { (void) DeleteImageProperty(image,"tiff:XResolution"); (void) DeleteImageProperty(image,"tiff:YResolution"); (void) DeleteImageProperty(image,"tiff:ResolutionUnit"); } } } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; ConstituteInfo constitute_info; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) InheritException(exception,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } InitializeConstituteInfo(read_info,&constitute_info); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property; const StringInfo *profile; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if ((*magick_path == '\0') && (*next->magick == '\0')) (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); SyncOrientationFromProperties(next,&constitute_info,exception); SyncResolutionFromProperties(next,&constitute_info,exception); if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; if (constitute_info.caption != (const char *) NULL) { property=InterpretImageProperties(read_info,next, constitute_info.caption,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } if (constitute_info.comment != (const char *) NULL) { property=InterpretImageProperties(read_info,next, constitute_info.comment,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } if (constitute_info.label != (const char *) NULL) { property=InterpretImageProperties(read_info,next, constitute_info.label,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; MagickStatusType flags; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { char timestamp[MagickTimeExtent]; (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } if (constitute_info.delay_flags != NoValue) { if ((constitute_info.delay_flags & GreaterValue) != 0) { if (next->delay > constitute_info.delay) next->delay=constitute_info.delay; } else if ((constitute_info.delay_flags & LessValue) != 0) { if (next->delay < constitute_info.delay) next->delay=constitute_info.delay; } else next->delay=constitute_info.delay; if ((constitute_info.delay_flags & SigmaValue) != 0) next->ticks_per_second=constitute_info.ticks_per_second; } if (constitute_info.dispose != (const char *) NULL) { ssize_t option_type; option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, constitute_info.dispose); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); if (GetBlobError(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); blob=Base64Decode(++p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; for (p=content; (*p != '/') && (*p != '\0'); p++) ; if (*p != '\0') { char *q; ssize_t i; /* Extract media type. */ if (LocaleNCompare(++p,"x-",2) == 0) p+=2; (void) strcpy(read_info->filename,"data."); q=read_info->filename+5; for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++) *q++=(*p++); *q++='\0'; } image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); if (GetBlobError(image) != MagickFalse) ThrowWriterException(FileOpenError,"UnableToWriteFile"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
FlopCounterFunctor.h
/** * @file FlopCounterFunctor.h * * @date 22 Jan 2018 * @author tchipevn */ #pragma once #include "autopas/pairwiseFunctors/Functor.h" #include "autopas/utils/ArrayMath.h" namespace autopas { /** * This class helps in getting the number of performed floating point * operations. It is a functor that only calculated the amount of floating point * operations. * @todo this class currently is limited to the following case: * - constant cutoff radius * - constant amount of floating point operations for one kernel call (distance * < cutoff) * @tparam Particle * @tparam ParticleCell */ template <class Particle> class FlopCounterFunctor : public Functor<Particle, FlopCounterFunctor<Particle>> { public: bool isRelevantForTuning() override { return false; } bool allowsNewton3() override { return true; } bool allowsNonNewton3() override { return true; } bool isAppropriateClusterSize(unsigned int clusterSize, DataLayoutOption::Value dataLayout) const override { return dataLayout == DataLayoutOption::aos; // no support for clusters yet, unless aos. } /** * constructor of FlopCounterFunctor * @param cutoffRadius the cutoff radius */ explicit FlopCounterFunctor<Particle>(double cutoffRadius) : autopas::Functor<Particle, FlopCounterFunctor<Particle>>(cutoffRadius), _cutoffSquare(cutoffRadius * cutoffRadius), _distanceCalculations(0ul), _kernelCalls(0ul) {} void AoSFunctor(Particle &i, Particle &j, bool newton3) override { if (i.isDummy() or j.isDummy()) { return; } auto dr = utils::ArrayMath::sub(i.getR(), j.getR()); double dr2 = utils::ArrayMath::dot(dr, dr); _distanceCalculations.fetch_add(1, std::memory_order_relaxed); if (dr2 <= _cutoffSquare) { _kernelCalls.fetch_add(1, std::memory_order_relaxed); } } /** * @copydoc Functor::SoAFunctorSingle() */ void SoAFunctorSingle(SoAView<typename Particle::SoAArraysType> soa, bool newton3) override { if (soa.getNumParticles() == 0) return; double *const __restrict x1ptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict y1ptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict z1ptr = soa.template begin<Particle::AttributeNames::posZ>(); for (unsigned int i = 0; i < soa.getNumParticles(); ++i) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) for (unsigned int j = i + 1; j < soa.getNumParticles(); ++j) { ++distanceCalculationsAcc; const double drx = x1ptr[i] - x1ptr[j]; const double dry = y1ptr[i] - y1ptr[j]; const double drz = z1ptr[i] - z1ptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) ++kernelCallsAcc; } _distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed); _kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed); } } /** * @copydoc Functor::SoAFunctorPair() */ void SoAFunctorPair(SoAView<typename Particle::SoAArraysType> soa1, SoAView<typename Particle::SoAArraysType> soa2, bool newton3) override { double *const __restrict x1ptr = soa1.template begin<Particle::AttributeNames::posX>(); double *const __restrict y1ptr = soa1.template begin<Particle::AttributeNames::posY>(); double *const __restrict z1ptr = soa1.template begin<Particle::AttributeNames::posZ>(); double *const __restrict x2ptr = soa2.template begin<Particle::AttributeNames::posX>(); double *const __restrict y2ptr = soa2.template begin<Particle::AttributeNames::posY>(); double *const __restrict z2ptr = soa2.template begin<Particle::AttributeNames::posZ>(); for (unsigned int i = 0; i < soa1.getNumParticles(); ++i) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) for (unsigned int j = 0; j < soa2.getNumParticles(); ++j) { ++distanceCalculationsAcc; const double drx = x1ptr[i] - x2ptr[j]; const double dry = y1ptr[i] - y2ptr[j]; const double drz = z1ptr[i] - z2ptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) { ++kernelCallsAcc; } } _distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed); _kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed); } } /** * @copydoc Functor::SoAFunctorVerlet() */ void SoAFunctorVerlet(SoAView<typename Particle::SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3) override { auto numParts = soa.getNumParticles(); if (numParts == 0) return; double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>(); const size_t listSizeI = neighborList.size(); const size_t *const __restrict currentList = neighborList.data(); // this is a magic number, that should correspond to at least // vectorization width*N have testet multiple sizes: // 4: small speedup compared to AoS // 8: small speedup compared to AoS // 12: small but best speedup compared to Aos // 16: smaller speedup // in theory this is a variable, we could auto-tune over... #ifdef __AVX512F__ // use a multiple of 8 for avx const size_t vecsize = 16; #else // for everything else 12 is faster const size_t vecsize = 12; #endif size_t joff = 0; // if the size of the verlet list is larger than the given size vecsize, // we will use a vectorized version. if (listSizeI >= vecsize) { alignas(64) std::array<double, vecsize> xtmp{}, ytmp{}, ztmp{}, xArr{}, yArr{}, zArr{}; // broadcast of the position of particle i for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xtmp[tmpj] = xptr[indexFirst]; ytmp[tmpj] = yptr[indexFirst]; ztmp[tmpj] = zptr[indexFirst]; } // loop over the verlet list from 0 to x*vecsize for (; joff < listSizeI - vecsize + 1; joff += vecsize) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // in each iteration we calculate the interactions of particle i with // vecsize particles in the neighborlist of particle i starting at // particle joff // gather position of particle j #pragma omp simd safelen(vecsize) for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xArr[tmpj] = xptr[currentList[joff + tmpj]]; yArr[tmpj] = yptr[currentList[joff + tmpj]]; zArr[tmpj] = zptr[currentList[joff + tmpj]]; } // do omp simd with reduction of the interaction #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) safelen(vecsize) for (size_t j = 0; j < vecsize; j++) { ++distanceCalculationsAcc; const double drx = xtmp[j] - xArr[j]; const double dry = ytmp[j] - yArr[j]; const double drz = ztmp[j] - zArr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; const unsigned long mask = (dr2 <= _cutoffSquare) ? 1 : 0; kernelCallsAcc += mask; } _distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed); _kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed); } } unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // this loop goes over the remainder and uses no optimizations for (size_t jNeighIndex = joff; jNeighIndex < listSizeI; ++jNeighIndex) { size_t j = neighborList[jNeighIndex]; if (indexFirst == j) continue; ++distanceCalculationsAcc; const double drx = xptr[indexFirst] - xptr[j]; const double dry = yptr[indexFirst] - yptr[j]; const double drz = zptr[indexFirst] - zptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) { ++kernelCallsAcc; } } _distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed); _kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed); } void CudaFunctor(CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle, bool newton3) override { #if defined(AUTOPAS_CUDA) // estimate flops on GPU size_t size = device_handle.template get<Particle::AttributeNames::posX>().size(); _distanceCalculations += size * size; _kernelCalls += size * size; #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } void CudaFunctor(CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle1, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle2, bool newton3) override { #if defined(AUTOPAS_CUDA) // estimate flops on GPU size_t size1 = device_handle1.template get<Particle::AttributeNames::posX>().size(); size_t size2 = device_handle2.template get<Particle::AttributeNames::posX>().size(); _distanceCalculations += size1 * size2; _kernelCalls += size1 * size2; #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::deviceSoALoader */ void deviceSoALoader(::autopas::SoA<typename Particle::SoAArraysType> &soa, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle) override { #if defined(AUTOPAS_CUDA) size_t size = soa.getNumParticles(); if (size == 0) return; device_handle.template get<Particle::AttributeNames::posX>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posX>()); device_handle.template get<Particle::AttributeNames::posY>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posY>()); device_handle.template get<Particle::AttributeNames::posZ>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posZ>()); device_handle.template get<Particle::AttributeNames::forceX>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceX>()); device_handle.template get<Particle::AttributeNames::forceY>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceY>()); device_handle.template get<Particle::AttributeNames::forceZ>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceZ>()); #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::deviceSoAExtractor */ void deviceSoAExtractor(::autopas::SoA<typename Particle::SoAArraysType> &soa, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle) override { #if defined(AUTOPAS_CUDA) size_t size = soa.getNumParticles(); if (size == 0) return; device_handle.template get<Particle::AttributeNames::forceX>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceX>()); device_handle.template get<Particle::AttributeNames::forceY>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceY>()); device_handle.template get<Particle::AttributeNames::forceZ>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceZ>()); #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::getNeededAttr() */ constexpr static std::array<typename Particle::AttributeNames, 3> getNeededAttr() { return std::array<typename Particle::AttributeNames, 3>{ Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ}; } /** * @copydoc Functor::getNeededAttr(std::false_type) */ constexpr static std::array<typename Particle::AttributeNames, 3> getNeededAttr(std::false_type) { return getNeededAttr(); } /** * @copydoc Functor::getComputedAttr() */ constexpr static std::array<typename Particle::AttributeNames, 0> getComputedAttr() { return std::array<typename Particle::AttributeNames, 0>{/*Nothing*/}; } /** * get the hit rate of the pair-wise interaction, i.e. the ratio of the number * of kernel calls compared to the number of distance calculations * @return the hit rate */ double getHitRate() { return static_cast<double>(_kernelCalls) / static_cast<double>(_distanceCalculations); } /** * get the total number of flops * @param numFlopsPerKernelCall * @return */ double getFlops(unsigned long numFlopsPerKernelCall) const { const double distFlops = numFlopsPerDistanceCalculation * static_cast<double>(_distanceCalculations); const double kernFlops = numFlopsPerKernelCall * static_cast<double>(_kernelCalls); return distFlops + kernFlops; } /** * get the number of calculated distance operations * @return */ unsigned long getDistanceCalculations() const { return _distanceCalculations; } /** * get the number of kernel calls, i.e. the number of pairs of particles with * a distance not larger than the cutoff * @return */ unsigned long getKernelCalls() const { return _kernelCalls; } /** * number of flops for one distance calculation. * 3 sub + 3 square + 2 add */ static constexpr double numFlopsPerDistanceCalculation = 8.0; private: double _cutoffSquare; std::atomic<size_t> _distanceCalculations, _kernelCalls; }; } // namespace autopas
GB_unaryop__minv_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_uint32 // op(A') function: GB_tran__minv_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_uint32 ( uint16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SB_TV_core.c
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "SB_TV_core.h" /* C-OMP implementation of Split Bregman - TV denoising-regularisation model (2D/3D) [1] * * Input Parameters: * 1. Noisy image/volume * 2. lambda - regularisation parameter * 3. Number of iterations [OPTIONAL parameter] * 4. eplsilon - tolerance constant [OPTIONAL parameter] * 5. TV-type: 'iso' or 'l1' [OPTIONAL parameter] * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * [1]. Goldstein, T. and Osher, S., 2009. The split Bregman method for L1-regularized problems. SIAM journal on imaging sciences, 2(2), pp.323-343. */ float SB_TV_CPU_main(float *Input, float *Output, float *infovector, float mu, int iter, float epsil, int methodTV, int dimX, int dimY, int dimZ) { int ll; long j, DimTotal; float re, re1, lambda; re = 0.0f; re1 = 0.0f; int count = 0; mu = 1.0f/mu; lambda = 2.0f*mu; float *Output_prev=NULL, *Dx=NULL, *Dy=NULL, *Bx=NULL, *By=NULL; DimTotal = (long)(dimX*dimY*dimZ); Output_prev = calloc(DimTotal, sizeof(float)); Dx = calloc(DimTotal, sizeof(float)); Dy = calloc(DimTotal, sizeof(float)); Bx = calloc(DimTotal, sizeof(float)); By = calloc(DimTotal, sizeof(float)); if (dimZ == 1) { /* 2D case */ copyIm(Input, Output, (long)(dimX), (long)(dimY), 1l); /*initialize */ /* begin outer SB iterations */ for(ll=0; ll<iter; ll++) { /* storing old estimate */ copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /* perform two GS iterations (normally 2 is enough for the convergence) */ gauss_seidel2D(Output, Input, Output_prev, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda, mu); copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /*GS iteration */ gauss_seidel2D(Output, Input, Output_prev, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda, mu); /* TV-related step */ if (methodTV == 1) updDxDy_shrinkAniso2D(Output, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda); else updDxDy_shrinkIso2D(Output, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda); /* update for Bregman variables */ updBxBy2D(Output, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY)); /* check early stopping criteria if epsilon not equal zero */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); /* stop if the norm residual is less than the tolerance EPS */ if (re < epsil) count++; if (count > 3) break; } } } else { /* 3D case */ float *Dz=NULL, *Bz=NULL; Dz = calloc(DimTotal, sizeof(float)); Bz = calloc(DimTotal, sizeof(float)); copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ)); /*initialize */ /* begin outer SB iterations */ for(ll=0; ll<iter; ll++) { /* storing old estimate */ copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /* perform two GS iterations (normally 2 is enough for the convergence) */ gauss_seidel3D(Output, Input, Output_prev, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, mu); copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /*GS iteration */ gauss_seidel3D(Output, Input, Output_prev, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, mu); /* TV-related step */ if (methodTV == 1) updDxDyDz_shrinkAniso3D(Output, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda); else updDxDyDz_shrinkIso3D(Output, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda); /* update for Bregman variables */ updBxByBz3D(Output, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ)); /* check early stopping criteria if epsilon not equal zero */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); /* stop if the norm residual is less than the tolerance EPS */ if (re < epsil) count++; if (count > 3) break; } } free(Dz); free(Bz); } free(Output_prev); free(Dx); free(Dy); free(Bx); free(By); /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ float gauss_seidel2D(float *U, float *A, float *U_prev, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY, float lambda, float mu) { float sum, normConst; long i,j,i1,i2,j1,j2,index; normConst = 1.0f/(mu + 4.0f*lambda); #pragma omp parallel for shared(U) private(index,i,j,i1,i2,j1,j2,sum) for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; for(j=0; j<dimY; j++) { /* symmetric boundary conditions (Neuman) */ j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; index = j*dimX+i; sum = Dx[j*dimX+i2] - Dx[index] + Dy[j2*dimX+i] - Dy[index] - Bx[j*dimX+i2] + Bx[index] - By[j2*dimX+i] + By[index]; sum += U_prev[j*dimX+i1] + U_prev[j*dimX+i2] + U_prev[j1*dimX+i] + U_prev[j2*dimX+i]; sum *= lambda; sum += mu*A[index]; U[index] = normConst*sum; }} return *U; } float updDxDy_shrinkAniso2D(float *U, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY, float lambda) { long i,j,i1,j1,index; float val1, val11, val2, val22, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,i,j,i1,j1,val1,val11,val2,val22) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; j1 = j+1; if (j1 == dimY) j1 = j-1; index = j*dimX+i; val1 = (U[j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[j1*dimX+i] - U[index]) + By[index]; val11 = fabs(val1) - denom_lam; if (val11 < 0) val11 = 0; val22 = fabs(val2) - denom_lam; if (val22 < 0) val22 = 0; if (val1 !=0) Dx[index] = (val1/fabs(val1))*val11; else Dx[index] = 0; if (val2 !=0) Dy[index] = (val2/fabs(val2))*val22; else Dy[index] = 0; }} return 1; } float updDxDy_shrinkIso2D(float *U, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY, float lambda) { long i,j,i1,j1,index; float val1, val11, val2, denom, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,i,j,i1,j1,val1,val11,val2,denom) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; j1 = j+1; if (j1 == dimY) j1 = j-1; index = j*dimX+i; val1 = (U[j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[j1*dimX+i] - U[index]) + By[index]; denom = sqrt(val1*val1 + val2*val2); val11 = (denom - denom_lam); if (val11 < 0) val11 = 0.0f; if (denom != 0.0f) { Dx[index] = val11*(val1/denom); Dy[index] = val11*(val2/denom); } else { Dx[index] = 0; Dy[index] = 0; } }} return 1; } float updBxBy2D(float *U, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY) { long i,j,i1,j1,index; #pragma omp parallel for shared(U) private(index,i,j,i1,j1) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; j1 = j+1; if (j1 == dimY) j1 = j-1; index = j*dimX+i; Bx[index] += (U[j*dimX+i1] - U[index]) - Dx[index]; By[index] += (U[j1*dimX+i] - U[index]) - Dy[index]; }} return 1; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ /*****************************************************************/ float gauss_seidel3D(float *U, float *A, float *U_prev, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ, float lambda, float mu) { float normConst, d_val, b_val, sum; long i,j,i1,i2,j1,j2,k,k1,k2,index; normConst = 1.0f/(mu + 6.0f*lambda); #pragma omp parallel for shared(U) private(index,i,j,i1,i2,j1,j2,k,k1,k2,d_val,b_val,sum) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; k1 = k+1; if (k1 == dimZ) k1 = k-1; k2 = k-1; if (k2 < 0) k2 = k+1; index = (dimX*dimY)*k + j*dimX+i; d_val = Dx[(dimX*dimY)*k + j*dimX+i2] - Dx[index] + Dy[(dimX*dimY)*k + j2*dimX+i] - Dy[index] + Dz[(dimX*dimY)*k2 + j*dimX+i] - Dz[index]; b_val = -Bx[(dimX*dimY)*k + j*dimX+i2] + Bx[index] - By[(dimX*dimY)*k + j2*dimX+i] + By[index] - Bz[(dimX*dimY)*k2 + j*dimX+i] + Bz[index]; sum = d_val + b_val; sum += U_prev[(dimX*dimY)*k + j*dimX+i1] + U_prev[(dimX*dimY)*k + j*dimX+i2] + U_prev[(dimX*dimY)*k + j1*dimX+i] + U_prev[(dimX*dimY)*k + j2*dimX+i] + U_prev[(dimX*dimY)*k1 + j*dimX+i] + U_prev[(dimX*dimY)*k2 + j*dimX+i]; sum *= lambda; sum += mu*A[index]; U[index] = normConst*sum; }}} return *U; } float updDxDyDz_shrinkAniso3D(float *U, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ, float lambda) { long i,j,i1,j1,k,k1,index; float val1, val11, val2, val22, val3, val33, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,i,j,i1,j1,k,k1,val1,val11,val2,val22,val3,val33) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; j1 = j+1; if (j1 == dimY) j1 = j-1; k1 = k+1; if (k1 == dimZ) k1 = k-1; val1 = (U[(dimX*dimY)*k + j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[(dimX*dimY)*k + j1*dimX+i] - U[index]) + By[index]; val3 = (U[(dimX*dimY)*k1 + j*dimX+i] - U[index]) + Bz[index]; val11 = fabs(val1) - denom_lam; if (val11 < 0.0f) val11 = 0.0f; val22 = fabs(val2) - denom_lam; if (val22 < 0.0f) val22 = 0.0f; val33 = fabs(val3) - denom_lam; if (val33 < 0.0f) val33 = 0.0f; if (val1 !=0.0f) Dx[index] = (val1/fabs(val1))*val11; else Dx[index] = 0.0f; if (val2 !=0.0f) Dy[index] = (val2/fabs(val2))*val22; else Dy[index] = 0.0f; if (val3 !=0.0f) Dz[index] = (val3/fabs(val3))*val33; else Dz[index] = 0.0f; }}} return 1; } float updDxDyDz_shrinkIso3D(float *U, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ, float lambda) { long i,j,i1,j1,k,k1,index; float val1, val11, val2, val3, denom, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,denom,i,j,i1,j1,k,k1,val1,val11,val2,val3) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; j1 = j+1; if (j1 == dimY) j1 = j-1; k1 = k+1; if (k1 == dimZ) k1 = k-1; val1 = (U[(dimX*dimY)*k + j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[(dimX*dimY)*k + j1*dimX+i] - U[index]) + By[index]; val3 = (U[(dimX*dimY)*k1 + j*dimX+i] - U[index]) + Bz[index]; denom = sqrt(val1*val1 + val2*val2 + val3*val3); val11 = (denom - denom_lam); if (val11 < 0) val11 = 0.0f; if (denom != 0.0f) { Dx[index] = val11*(val1/denom); Dy[index] = val11*(val2/denom); Dz[index] = val11*(val3/denom); } else { Dx[index] = 0; Dy[index] = 0; Dz[index] = 0; } }}} return 1; } float updBxByBz3D(float *U, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ) { long i,j,k,i1,j1,k1,index; #pragma omp parallel for shared(U) private(index,i,j,k,i1,j1,k1) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; j1 = j+1; if (j1 == dimY) j1 = j-1; k1 = k+1; if (k1 == dimZ) k1 = k-1; Bx[index] += (U[(dimX*dimY)*k + j*dimX+i1] - U[index]) - Dx[index]; By[index] += (U[(dimX*dimY)*k + j1*dimX+i] - U[index]) - Dy[index]; Bz[index] += (U[(dimX*dimY)*k1 + j*dimX+i] - U[index]) - Dz[index]; }}} return 1; }
_grave.c
//HESAFF void detectKeypointsList(int num_fpaths, // char** image_fpath_list, // float** keypoints_array, // uint8** descriptors_array, // int* length_array, // __HESAFF_PARAM_SIGNATURE_ARGS__ // ) //{ // assert(0); // do not use // // Maybe use this implimentation instead to be more similar to the way // // pyhesaff calls this library? // int index; // #pragma omp parallel for private(index) // for(index = 0; index < num_fpaths; ++index) // { // char* image_filename = image_fpath_list[index]; // AffineHessianDetector* detector = // new_hesaff_fpath(image_filename, __HESAFF_PARAM_CALL_ARGS__); // detector->DBG_params(); // int length = detector->detect(); // length_array[index] = length; // // TODO: shouldn't python be doing this allocation? // keypoints_array[index] = new float[length * KPTS_DIM]; // descriptors_array[index] = new uint8[length * DESC_DIM]; // exportArrays(detector, length, keypoints_array[index], descriptors_array[index]); // delete detector; // } //} //void detectKeypoints(char* image_filename, // float** keypoints, // uint8** descriptors, // int* length, // __HESAFF_PARAM_SIGNATURE_ARGS__ // ) //{ // AffineHessianDetector* detector = new_hesaff_fpath(image_filename, __HESAFF_PARAM_CALL_ARGS__); // detector->DBG_params(); // *length = detector->detect(); // // TODO: shouldn't python be doing this allocation? // *keypoints = new float[(*length)*KPTS_DIM]; // *descriptors = new uint8[(*length)*DESC_DIM]; // detector->exportArrays((*length), *keypoints, *descriptors); // //may need to add support for "use_adaptive_scale" and "nogravity_hack" here (needs translation from Python to C++ first) // delete detector; //} //typedef void*(*allocer_t)(int, int*); //const HESAFF char* cmake_build_type() //{ // // References: // // http://stackoverflow.com/questions/14883853/ctypes-return-a-string-from-c-function // char *build_type = (char*) malloc(sizeof(char) * (10 + 1)); // #ifdef CMAKE_BUILD_TYPE // //char hello[] = CMAKE_BUILD_TYPE // strcpy(build_type, "testb1"); // #else // strcpy(build_type, "testb2"); // #endif // return build_type; //} //HESAFF char* free_char(char* malloced_char) //{ // // need to free anything malloced here // free(malloced_char); //}
libtorch_utils.h
#ifndef libtorch_UTILS #define libtorch_UTILS /* Copyright (c) 2019, Sanaxen All rights reserved. Use of this source code is governed by a MIT license that can be found in the LICENSE file. */ #include <random> #include "util/utils.h" #ifdef USE_IMAGE_UTIL #include "util/Image.hpp" #endif namespace cpp_torch { inline void nop() { // do nothing } /** * error exception class **/ class error_exception : public std::exception { public: explicit error_exception(const std::string &msg) : msg_(msg) { fprintf(stderr, "ERROR:%s\n", msg.c_str()); fflush(stderr); } const char *what() const throw() override { return msg_.c_str(); } private: std::string msg_; }; inline size_t tensor_flatten_size(torch::Tensor& t) { size_t s = 1; if (t.dim()) { for (int i = 0; i < t.sizes().size(); i++) { s *= t.sizes()[i]; } return s; } return 0; } inline void dump_dim(const std::string & s, torch::Tensor& t) { printf("%s dim:%d ", s.c_str(), t.dim()); if (t.dim()) { for (int i = 0; i < t.sizes().size() - 1; i++) { printf("%d x", t.sizes()[i]); } printf("%d\n", t.sizes()[t.sizes().size() - 1]); } fflush(stdout); } inline void dump_dim(char* s, torch::Tensor& t) { dump_dim(std::string(s), t); } void label2vec(const std::vector<tiny_dnn::label_t>& labels, std::vector<tiny_dnn::vec_t>& vec, int max_label) { vec.clear(); vec.resize(labels.size()); #pragma omp parallel for for (int i = 0; i < labels.size(); i++) { tiny_dnn::vec_t t(max_label, 0); t[labels[i]] = 1.0; vec[i] = t; } } template < typename initial_vector> inline void toTorchTensors(initial_vector& vec, std::vector<torch::Tensor>& tensor_vect) { tensor_vect.resize(vec.size()); #pragma omp parallel for for (int i = 0; i < vec.size(); i++) { //torch::Tensor& tensor = torch::tensor({ vec[i] }); 1.3 torch::Tensor& tensor = torch::tensor(vec[i]); tensor_vect[i] = tensor; } } template < typename initial_vector> inline torch::Tensor toTorchTensors(initial_vector& vec) { //return torch::tensor({ vec }); 1.3 return torch::tensor( vec ); } inline std::vector<tiny_dnn::tensor_t> toTensor_t(torch::Tensor& x, int batch, int channel, int h, int w) { std::vector<tiny_dnn::tensor_t> y; const int size = channel * h*w; torch::Tensor& xx = x.view({ batch, 1,1, size }); y.resize(batch); #pragma omp parallel for for (int i = 0; i < batch; i++) { tiny_dnn::tensor_t t; tiny_dnn::vec_t v(size); for (int j = 0; j < size; j++) { v[j] = xx[i][0][0][j].template item<float_t>(); } t.push_back(v); y[i] = t; } return y; } inline tiny_dnn::tensor_t toTensor_t(torch::Tensor& x, int channel, int h, int w) { const int size = channel * h*w; torch::Tensor& xx = x.view({ 1, 1,1, size }); tiny_dnn::tensor_t t; tiny_dnn::vec_t v(size); #if 0 #pragma omp parallel for for (int j = 0; j < size; j++) { v[j] = xx[0][0][0][j].template item<float_t>(); } #else const float* p = xx.cpu().data<float>(); v.assign(p, p + size); #endif t.push_back(v); return t; } inline tiny_dnn::vec_t toTensor_t(torch::Tensor& x, int size) { torch::Tensor& xx = x.view({ 1, 1,1, size }); tiny_dnn::vec_t v(size); #if 0 #pragma omp parallel for for (int j = 0; j < size; j++) { v[j] = xx[0][0][0][j].template item<float_t>(); } #else const float* p = xx.cpu().data<float_t>(); v.assign(p, p + size); #endif return v; } inline int get_BATCH(const std::vector<torch::Tensor>& images, torch::Tensor& batch_images, const int batchSize, std::vector<int>& index) { int batchNum = images.size() / batchSize; if (batchNum == 0) { printf("input size < batch size.\n"); fflush(stdout); throw error_exception("input size < batch size"); } batch_images = images[index[0]]; for (int i = 1; i < index.size(); i++) { batch_images = torch::cat({ batch_images, images[index[i]] }, 0); } return batchNum; } void TensorToImageFile(torch::Tensor image_tensor, const std::string& filename, const int scale = 1.0) { #ifdef USE_IMAGE_UTIL const int channels = image_tensor.sizes()[0]; const int h = image_tensor.sizes()[1]; const int w = image_tensor.sizes()[2]; if (channels == 0 || channels > 3) { dump_dim("image_tensor", image_tensor); throw error_exception("tensor dimension != CxHxW"); } tiny_dnn::tensor_t& img = toTensor_t(image_tensor.view({ channels, h,w }), channels, h, w); const int sz = img[0].size(); #pragma omp parallel for for (int i = 0; i < sz; i++) { img[0][i] *= scale; } Image rgb_img = vec_t2image(img[0], channels, h, w); ImageWrite(filename.c_str(), &rgb_img); #else throw error_exception("undefined USE_IMAGE_UTIL"); #endif } inline int get_BATCH(const std::vector<torch::Tensor>& images, const std::vector<torch::Tensor>& labels, torch::Tensor& batch_images, torch::Tensor& batch_labels, const int batchSize, std::vector<int>& index) { int batchNum = images.size() / batchSize; if (batchNum == 0) { printf("input size < batch size.\n"); fflush(stdout); throw error_exception("input size < batch size"); } batch_images = images[index[0]]; batch_labels = labels[index[0]]; for (int i = 1; i < index.size(); i++) { batch_images = torch::cat({ batch_images, images[index[i]] }, 0); batch_labels = torch::cat({ batch_labels, labels[index[i]] }, 0); } return batchNum; } inline void optimizer_lr_chg(std::string& optimizer_name, torch::optim::Optimizer* optimizer, float alp) { if (optimizer_name == "adam") { auto& opt = static_cast<torch::optim::AdamOptions&>(optimizer->param_groups()[0].options()); if (opt.lr() < 1.0e-5) return; printf("\nlr:%.10f ->", opt.lr()); opt.lr(opt.lr()*alp); printf(" %.10f\n", opt.lr()); } if (optimizer_name == "sgd") { auto& opt = static_cast<torch::optim::SGDOptions&>(optimizer->param_groups()[0].options()); if (opt.lr() < 1.0e-5) return; opt.lr(opt.lr()*alp); } if (optimizer_name == "rmsprop") { auto& opt = static_cast<torch::optim::RMSpropOptions&>(optimizer->param_groups()[0].options()); if (opt.lr() < 1.0e-5) return; opt.lr(opt.lr()*alp); } if (optimizer_name == "adagrad") { auto& opt = static_cast<torch::optim::AdagradOptions&>(optimizer->param_groups()[0].options()); if (opt.lr() < 1.0e-5) return; opt.lr(opt.lr()*alp); } } template < typename Model> class network_torch { std::vector<float_t> Tolerance_Set; float loss_value = 0.0; float clip_grad_value = 0.0; bool early_stopping = false; int patience = 100; public: int in_channels = 1; int in_H = 1; int in_W = 1; int out_channels = 1; int out_H = 1; int out_W = 1; tiny_dnn::timer time_measurement; torch::Device device; Model model; std::string optimizer_name; /** * @param model_ model of neural networks * @param optimizer_ optimizing algorithm for training * @param device_ Device Type(kCPU, kCUDA) * assume */ network_torch(Model& model_, torch::Device device_) :model(model_), device(device_) { try { model.get()->to(device); } catch (std::exception& e) { printf("%s\n", e.what()); exit(0); } } inline void set_early_stopping(bool flag, int patience_=100) { early_stopping = flag; patience = patience_; } inline bool get_early_stopping() { return early_stopping; } inline void set_clip_grad_norm(float v) { clip_grad_value = v; } inline float get_clip_grad_norm() { return clip_grad_value; } inline void input_dim(int c, int w, int h) { in_channels = c; in_H = h; in_W = w; } inline void output_dim(int c, int w, int h) { out_channels = c; out_H = h; out_W = w; } bool classification = false; bool batch_shuffle = true; bool pre_make_batch = true; bool stop_training_ = false; /** * request to finish an ongoing training * * It is safe to test the current network performance in @a * on_batch_enumerate * and * @a on_epoch_enumerate callbacks during training. */ inline void stop_ongoing_training() { stop_training_ = true; } /** * @param images array of input data * @param batch_x input data batch * assume */ inline void generate_BATCH( std::vector<torch::Tensor> &images, std::vector< torch::Tensor>& batch_x ) { bool shuffle = batch_shuffle; const int batchNum = (int64_t)((float)images.size() / (float)kTrainBatchSize + 0.5); batch_x = std::vector< torch::Tensor>(batchNum); std::random_device rnd; std::mt19937 mt(rnd()); std::uniform_int_distribution<> rand_index(0, (int)images.size() - 1); #pragma omp parallel for for (int batch_idx = 0; batch_idx < batchNum; batch_idx++) { std::vector<int> index(kTrainBatchSize); if (shuffle) { for (int k = 0; k < kTrainBatchSize; k++) { index[k] = rand_index(mt); } } else { for (int k = 0; k < kTrainBatchSize; k++) { index[k] = (batch_idx*kTrainBatchSize + k) % images.size(); } } get_BATCH(images, batch_x[batch_idx], kTrainBatchSize, index); if (tensor_flatten_size(batch_x[batch_idx]) < kTrainBatchSize*in_channels*in_H*in_W) { dump_dim("batch_x", batch_x[batch_idx]); std::cout << tensor_flatten_size(batch_x[batch_idx]) << " < " << kTrainBatchSize << "*" << in_channels << "*" << in_H << "* " << in_W << "=" << kTrainBatchSize*in_channels*in_H*in_W << std::endl; printf("tensor size error.\n"); fflush(stdout); throw error_exception("tensor size error."); } batch_x[batch_idx] = batch_x[batch_idx].view({ kTrainBatchSize, in_channels, in_H, in_W }); } } /** * @param images array of input data * @param labels array of labels output * @param batch_x input data batch * @param batch_y output data batch * assume */ inline void generate_BATCH( std::vector<torch::Tensor> &images, std::vector<torch::Tensor> &labels, std::vector< torch::Tensor>& batch_x, std::vector< torch::Tensor>& batch_y ) { bool shuffle = batch_shuffle; int batch_tmp = kTrainBatchSize; if (batch_tmp > images.size()) { batch_tmp = images.size(); } //printf("%d -> lost:%d\n", images.size(), images.size() % kTrainBatchSize); //for (int i = batch_tmp; i >= 2; i--) //{ // if (images.size() % i == 0) // { // batch_tmp = i; // break; // } //} //printf("Please change:kTrainBatchSize:%d -> %d\n", kTrainBatchSize, batch_tmp); const int batchNum = (int64_t)((float)images.size() / (float)kTrainBatchSize + 0.5); batch_x = std::vector< torch::Tensor>(batchNum); batch_y = std::vector< torch::Tensor>(batchNum); std::random_device rnd; std::mt19937 mt(rnd()); std::uniform_int_distribution<> rand_index(0, (int)images.size() - 1); #pragma omp parallel for for (int batch_idx = 0; batch_idx < batchNum; batch_idx++) { std::vector<int> index(kTrainBatchSize); if (shuffle) { for (int k = 0; k < kTrainBatchSize; k++) { index[k] = rand_index(mt); } } else { for (int k = 0; k < kTrainBatchSize; k++) { index[k] = (batch_idx*kTrainBatchSize + k) % images.size(); } } get_BATCH(images, labels, batch_x[batch_idx], batch_y[batch_idx], kTrainBatchSize, index); if (tensor_flatten_size(batch_x[batch_idx]) < kTrainBatchSize*in_channels*in_H*in_W) { dump_dim("batch_x", batch_x[batch_idx]); std::cout << tensor_flatten_size(batch_x[batch_idx]) << " < " << kTrainBatchSize << "*" << in_channels << "*" << in_H << "* " << in_W << "=" <<kTrainBatchSize*in_channels*in_H*in_W << std::endl; printf("tensor size error.\n"); fflush(stdout); throw error_exception("tensor size error."); } if (tensor_flatten_size(batch_y[batch_idx]) < kTrainBatchSize*out_channels*out_H*out_W) { dump_dim("batch_y", batch_y[batch_idx]); std::cout << tensor_flatten_size(batch_y[batch_idx]) << " < " << kTrainBatchSize << "*" << out_channels << "*" << out_H << "* " << out_W << "=" << kTrainBatchSize*out_channels*out_H*out_W << std::endl; printf("tensor size error.\n"); fflush(stdout); throw error_exception("tensor size error."); } batch_x[batch_idx] = batch_x[batch_idx].view({ kTrainBatchSize, in_channels, in_H, in_W }); batch_y[batch_idx] = batch_y[batch_idx].view({ kTrainBatchSize, out_channels, out_H, out_W }); } } /** * @param optimizer optimizing algorithm for training * @param images array of input data * @param labels array of labels output * @param kTrainBatchSize number of mini-batch * @param kNumberOfEpochs number of training epochs * @param on_batch_enumerate callback for each mini-batch enumerate * @param on_epoch_enumerate callback for each epoch * assume */ bool fit( torch::optim::Optimizer* optimizer, std::vector<torch::Tensor> &images, std::vector<torch::Tensor> &labels, int kTrainBatchSize, int kNumberOfEpochs, std::function <void(void)> on_batch_enumerate = {}, std::function <void(void)> on_epoch_enumerate = {} ) { if (images.size() != labels.size()) { return false; } if (images.size() < kTrainBatchSize || labels.size() < kTrainBatchSize) { return false; } time_measurement.start(); int batchNum; std::vector< torch::Tensor> batch_x; std::vector< torch::Tensor> batch_y; if (pre_make_batch) { generate_BATCH(images, labels, batch_x, batch_y); batchNum = batch_x.size(); for (int i = 0; i < batchNum; i++) { batch_x[i] = batch_x[i].to(device); batch_y[i] = batch_y[i].to(device); } } std::vector<int> batch_idx_list; for (int i = 0; i < batchNum; i++) { batch_idx_list.push_back(i); } std::mt19937 get_rand_mt; optimizer->zero_grad(); stop_training_ = false; model.get()->train(true); int early_stopping_count = 0; std::vector<float> loss_values; for (size_t epoch = 0; epoch < kNumberOfEpochs && !stop_training_; ++epoch) { if (!pre_make_batch) { generate_BATCH(images, labels, batch_x, batch_y); batchNum = batch_x.size(); for (int i = 0; i < batchNum; i++) { batch_x[i] = batch_x[i].to(device); batch_y[i] = batch_y[i].to(device); } } if (this->batch_shuffle) { std::shuffle(batch_idx_list.begin(), batch_idx_list.end(), get_rand_mt); } loss_value = 0.0; float loss_ave = 0.0; for (int b_idx = 0; b_idx < batchNum && !stop_training_; b_idx++) { const int batch_idx = batch_idx_list[b_idx]; torch::Tensor& data = batch_x[batch_idx]; torch::Tensor& targets = batch_y[batch_idx]; //data = data.to(device); //targets = targets.to(device); optimizer->zero_grad(); auto output = model.get()->forward(data); //dump_dim("output", output); //dump_dim("targets", targets); targets = targets.reshape_as(output); torch::Tensor loss; if (classification) { loss = torch::nll_loss(output, targets.argmax(1)); } else { loss = torch::mse_loss(output, targets); } if (std::isnan(loss.template item<float_t>())) { std::cout << "loss value is nan" << std::endl; } AT_ASSERT(!std::isnan(loss.template item<float_t>())); loss.backward(); if ( fabs(clip_grad_value) > 0.0) { torch::nn::utils::clip_grad_norm_(model->parameters(), clip_grad_value); } optimizer->step(); loss_value = loss.template item<float_t>(); loss_ave += loss_value; on_batch_enumerate(); model.get()->train(true); } if (stop_training_) break; loss_value = loss_ave / kTrainBatchSize; #if 10 if (patience < epoch ) { //std::cout << "patience " << patience << std::endl; //std::cout << "epoch " << epoch << std::endl; //std::cout << "early_stopping_count " << early_stopping_count << std::endl; if (loss_values.size() > 20) { const int n = loss_values.size() - loss_values.size() / 3; float loss_mean = loss_values[0]; for (int i = 1; i < n; i++) { loss_mean += loss_values[i]; } loss_mean /= n; float sigma2 = 0; for (int i = 0; i < n; i++) { sigma2 += (loss_values[i] - loss_mean)*(loss_values[i] - loss_mean); } sigma2 /= n; loss_values.clear(); //2.0 => 97.7% 1.96 => 95% const float u = loss_mean + 2.0*sigma2 / sqrt(n); const float d = loss_mean - 2.0*sigma2 / sqrt(n); std::cout << "u " << u << std::endl; std::cout << "d " << d << std::endl; std::cout << "sigma2 " << sigma2 << std::endl; std::cout << "loss_mean " << loss_mean << std::endl; std::cout << "loss_value " << loss_value << std::endl; if (loss_value > u) { std::cout << "early_stopping_count " << early_stopping_count << std::endl; early_stopping_count++; if (get_early_stopping() && early_stopping_count > 3) { std::cout << "early_stopping" << std::endl; break; } optimizer_lr_chg(optimizer_name, optimizer, 0.5); //early_stopping_count = 0; //getchar(); //auto &options = static_cast<torch::optim::OptimizerOptions &>(optimizer->param_groups()[0].options()); //options.lr(options.lr() * 0.1); } else { early_stopping_count = 0; } } loss_values.push_back(loss_value); } #endif on_epoch_enumerate(); model.get()->train(true); } time_measurement.stop(); return true; } /** * @param optimizer optimizing algorithm for training * @param images array of input data * @param labels array of labels output * @param kTrainBatchSize number of mini-batch * @param kNumberOfEpochs number of training epochs * @param on_batch_enumerate callback for each mini-batch enumerate * @param on_epoch_enumerate callback for each epoch * assume */ bool fit( torch::optim::Optimizer* optimizer, tiny_dnn::tensor_t &images, tiny_dnn::tensor_t &labels, int kTrainBatchSize, int kNumberOfEpochs, std::function <void(void)> on_batch_enumerate = {}, std::function <void(void)> on_epoch_enumerate = {} ) { std::vector<torch::Tensor> images_torch; std::vector<torch::Tensor> labels_torch; toTorchTensors(images, images_torch); toTorchTensors(labels, labels_torch); return fit(optimizer, images_torch, labels_torch, kTrainBatchSize, kNumberOfEpochs, on_batch_enumerate, on_epoch_enumerate); } /** * @param optimizer optimizing algorithm for training * @param images array of input data * @param class_labels array of label-id for each input data(0-origin) label-id=on-hot-vector * @param kTrainBatchSize number of mini batch * @param kNumberOfEpochs number of training epochs * @param on_batch_enumerate callback for each mini-batch enumerate * @param on_epoch_enumerate callback for each epoch * assume */ bool train( torch::optim::Optimizer* optimizer, tiny_dnn::tensor_t &images, std::vector<tiny_dnn::label_t> &class_labels, int kTrainBatchSize, int kNumberOfEpochs, std::function <void(void)> on_batch_enumerate = {}, std::function <void(void)> on_epoch_enumerate = {} ) { std::vector<tiny_dnn::vec_t> one_hot_vec; label2vec(class_labels, one_hot_vec); std::vector<torch::Tensor> images_torch; std::vector<torch::Tensor> labels_torch; toTorchTensors(images, images_torch); toTorchTensors(one_hot_vec, labels_torch); return fit(optimizer, images_torch, labels_torch, kTrainBatchSize, kNumberOfEpochs, on_batch_enumerate, on_epoch_enumerate); } bool test( std::vector<torch::Tensor> &images, std::vector<torch::Tensor> &labels, int kTestBatchSize ) { if (images.size() != labels.size()) { return false; } if (images.size() < kTestBatchSize || labels.size() < kTestBatchSize) { return false; } //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); float loss_ave = 0.0; int correct = 0; //int batch_tmp = kTestBatchSize; //if (batch_tmp > images.size()) //{ // batch_tmp = images.size(); //} //printf("%d -> lost:%d\n", images.size(), images.size() % kTestBatchSize); //for (int i = batch_tmp; i >= 2; i--) //{ // if (images.size() % i == 0) // { // batch_tmp = i; // break; // } //} //printf("Please change:kTestBatchSize:%d -> %d\n", kTestBatchSize, batch_tmp); int testNum = images.size() / kTestBatchSize; if (testNum == 0) { printf("input size < test batch size.\n"); fflush(stdout); throw error_exception("input size < test batch size"); } for (size_t test = 0; test < testNum; ++test) { torch::Tensor batch_x; torch::Tensor batch_y; std::vector<int> index(kTestBatchSize); for (int k = 0; k < kTestBatchSize; k++) { index[k] = (kTestBatchSize * test + k) % images.size(); } get_BATCH(images, labels, batch_x, batch_y, kTestBatchSize, index); torch::Tensor& data = batch_x.view({ kTestBatchSize,in_channels, in_H, in_W }); torch::Tensor& targets = batch_y.view({ kTestBatchSize, out_channels, out_H, out_W }); data = data.to(device); targets = targets.to(device); torch::Tensor output = model.get()->forward(data); targets = targets.reshape_as(output); torch::Tensor loss; if (classification) { loss = torch::nll_loss(output, targets.argmax(1)); } else { loss = torch::mse_loss(output, targets); } AT_ASSERT(!std::isnan(loss.template item<float_t>())); loss_value = loss.template item<float_t>(); loss_ave += loss_value; if (classification) { auto pred = output.argmax(1); correct += pred.eq(targets.argmax(1)).sum().template item<int64_t>(); } // if (classification_one_hot_vector) // { //#if 1 // auto pred = output.argmax(1); // correct += pred.eq(targets.argmax(1)).sum().template item<int64_t>(); //#else //#pragma omp parallel for // for (int k = 0; k < kTestBatchSize; k++) // { // correct += (vec_max_index(output[k]) == vec_max_index(targets[k])) ? 1 : 0; // // { // //std::vector<tiny_dnn::tensor_t>& x = toTensor_t(output[k], 1, 1, 1, out_data_size()); // //std::vector<tiny_dnn::tensor_t>& y = toTensor_t(targets[k], 1, 1, 1, out_data_size()); // //AT_ASSERT(vec_max_index(x[0][0])== vec_max_index(output[k])); // //AT_ASSERT(vec_max_index(y[0][0]) == vec_max_index(targets[k])); // // //tiny_dnn::tensor_t& x = toTensor_t(output[k], 1, 1, out_data_size()); // //tiny_dnn::tensor_t& y = toTensor_t(targets[k], 1, 1, out_data_size()); // //AT_ASSERT(vec_max_index(x[0]) == vec_max_index(output[k])); // //AT_ASSERT(vec_max_index(y[0]) == vec_max_index(targets[k])); // // //tiny_dnn::vec_t& x = toTensor_t(output[k], out_data_size()); // //tiny_dnn::vec_t& y = toTensor_t(targets[k], out_data_size()); // //AT_ASSERT(vec_max_index(x) == vec_max_index(output[k])); // //AT_ASSERT(vec_max_index(y) == vec_max_index(targets[k])); // } // } //#endif // } } if (classification) { std::printf(" Accuracy: %.3f%% Loss: %.3f\n", 100.0*static_cast<float_t>(correct) / images.size(), loss_ave / testNum); } else { std::printf("Loss: %.3f\n", loss_ave / testNum); } return true; } /** * @param images array of input data * @param labels array of output data * @param kTestBatchSize number of mini batch * assume */ bool test( tiny_dnn::tensor_t &images, tiny_dnn::tensor_t &labels, int kTestBatchSize ) { std::vector<torch::Tensor> images_torch; std::vector<torch::Tensor> labels_torch; toTorchTensors(images, images_torch); toTorchTensors(labels, labels_torch); return test(images_torch, labels_torch, kTestBatchSize); } /** * @param images array of input data * @param labels array of lable(on-hot-vector) data * @param kTestBatchSize number of mini batch * assume */ bool test( tiny_dnn::tensor_t &images, std::vector<tiny_dnn::label_t> &class_labels, int kTestBatchSize ) { std::vector<tiny_dnn::vec_t> one_hot_vec; label2vec(class_labels, one_hot_vec); std::vector<torch::Tensor> images_torch; std::vector<torch::Tensor> labels_torch; toTorchTensors(images, images_torch); toTorchTensors(one_hot_vec, labels_torch); return test(images_torch, labels_torch, kTestBatchSize); } torch::Tensor fprop(torch::Tensor &in) { torch::NoGradGuard no_grad; model->eval(); model.get()->train(false); return model.get()->forward(in); } /** * executes forward-propagation and returns output **/ inline torch::Tensor predict(torch::Tensor& X) { //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); torch::Tensor y = model.get()->forward(X.to(device)); return y; } /** * executes forward-propagation and returns output **/ inline std::vector<tiny_dnn::tensor_t> predict(torch::Tensor& X, const int batch) { //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); torch::Tensor y = model.get()->forward(X.to(device)); std::vector<tiny_dnn::tensor_t> t; toTensor_t(y, t, batch, out_channels, out_H, out_W); return t; } /** * executes forward-propagation and returns output **/ inline std::vector<tiny_dnn::vec_t> predict(std::vector<tiny_dnn::vec_t>& X, int batch = 1) { //printf("X.size()=%d\n", X.size()); fflush(stdout); std::vector<tiny_dnn::vec_t> out; int batch_n = X.size() / batch; if (X.size() < batch || batch == 1) { for (int i = 0; i < X.size(); i++) { out.emplace_back(predict(X[i])); } return out; } //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); std::vector<torch::Tensor> n_batch_images(batch_n); #pragma omp parallel for for (int i = 0; i < batch_n; i++) { torch::Tensor images_torch = toTorchTensors(X[i*batch]).view({ 1, in_channels, in_H, in_W }).to(device); auto batch_images = images_torch; for (int j = 1; j < batch; j++) { torch::Tensor images_torch = toTorchTensors(X[i*batch + j]).view({ 1, in_channels, in_H, in_W }).to(device); batch_images = torch::cat({ batch_images, images_torch }, 0); } n_batch_images[i] = batch_images; } for (int k = 0; k < batch_n; k++) { //cpp_torch::dump_dim("batch_images", batch_images); torch::Tensor y = model.get()->forward(n_batch_images[k]); y = y.view({ batch, out_channels, out_H, out_W }); for (int i = 0; i < batch; i++) { //cpp_torch::dump_dim("torch::Tensor y", y); //std::cout << " " << out_data_size() << std::endl; tiny_dnn::vec_t& t = toTensor_t(y[i], out_data_size()); out.emplace_back(t); } } int n = X.size() % batch; //printf("n=%d\n", n); fflush(stdout); if (n > 0 && X.size() > batch) { for (int i = X.size() - n; i < X.size(); i++) { out.emplace_back(predict(X[i])); } } //printf("out.size()=%d\n", out.size()); fflush(stdout); return out; } /** * executes forward-propagation and returns output **/ inline tiny_dnn::vec_t predict(tiny_dnn::vec_t& X) { //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); torch::Tensor& images_torch = toTorchTensors(X).view({ 1, in_channels, in_H, in_W }).to(device); torch::Tensor y = model.get()->forward(images_torch); //cpp_torch::dump_dim("torch::Tensor y", y); //std::cout << " " << out_data_size() << std::endl; tiny_dnn::vec_t& t = toTensor_t(y, out_data_size()); return t; } /** * executes forward-propagation and returns output **/ inline tiny_dnn::label_t predict_label(tiny_dnn::vec_t& X) { //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); torch::Tensor images_torch = toTorchTensors(X).view({ 1, in_channels, in_H, in_W }).to(device); torch::Tensor y = model.get()->forward(images_torch); tiny_dnn::vec_t t = toTensor_t(y, out_data_size()); return vec_max_index(t); } inline void label2vec(const std::vector<tiny_dnn::label_t>& labels, std::vector<tiny_dnn::vec_t>& vec) { const size_t outdim = out_data_size(); vec.clear(); vec.resize(labels.size()); const size_t sz = labels.size(); #pragma omp parallel for for (int i = 0; i < sz; i++) { tiny_dnn::vec_t t(outdim, 0); t[labels[i]] = 1.0; vec[i] = t; } } inline void label2vec(const tiny_dnn::label_t& labels, tiny_dnn::vec_t& vec) { const size_t outdim = out_data_size(); tiny_dnn::vec_t t(outdim, 0); t[labels] = 1.0; vec = t; } inline tiny_dnn::label_t vec_max_index(torch::Tensor &out) { return tiny_dnn::label_t(out.view({ out_data_size() }).argmax(0).template item<float_t>()); } inline tiny_dnn::label_t vec_max_index(tiny_dnn::vec_t &out) { return tiny_dnn::label_t(max_index(out)); } inline tiny_dnn::label_t vec_max_index(tiny_dnn::tensor_t &out) { return tiny_dnn::label_t(max_index(out[0])); } float_t get_loss(std::vector<tiny_dnn::vec_t> &in, std::vector<tiny_dnn::label_t> &t, int batchSize) { std::vector<tiny_dnn::vec_t> vec; label2vec(t, vec); return get_loss(in, vec, batchSize); } float_t get_loss( std::vector<tiny_dnn::vec_t> &in, std::vector<tiny_dnn::vec_t> &t, int BatchSize) { float_t sum_loss = float_t(0); std::vector<torch::Tensor> images; std::vector<torch::Tensor> labels; //printf("in:%d\n", in.size()); toTorchTensors(in, images); toTorchTensors(t, labels); //int batch_tmp = BatchSize; //if (batch_tmp > in.size()) //{ // batch_tmp = in.size(); //} //printf("lost:%d\n", in.size() % BatchSize); //for (int i = batch_tmp; i >= 2; i--) //{ // if (images.size() % i == 0) // { // batch_tmp = i; // break; // } //} //printf("change:BatchSize:%d -> %d\n", BatchSize, batch_tmp); //BatchSize = batch_tmp; const int batchNum = (const int)((float)in.size() / BatchSize +0.5); if (batchNum == 0) { printf("input size:%d BatchSize:%d\n", in.size(), BatchSize); throw error_exception("input size < Batch Size"); } std::vector< torch::Tensor> batch_x(batchNum); std::vector< torch::Tensor> batch_y(batchNum); //torch::NoGradGuard no_grad; //model->eval(); model.get()->train(false); std::vector<float_t> loss_list(in.size(), 0.0); //#pragma omp parallel for for (int i = 0; i < batchNum; i++) { //if (!pre_make_batch) { std::vector<int> index(BatchSize); for (int k = 0; k < BatchSize; k++) { index[k] = (i* BatchSize + k) % images.size(); } get_BATCH(images, labels, batch_x[i], batch_y[i], BatchSize, index); batch_x[i] = batch_x[i].view({ BatchSize, in_channels, in_H, in_W }); batch_y[i] = batch_y[i].view({ BatchSize, out_channels, out_H, out_W }); } torch::Tensor& input = batch_x[i].to(device); torch::Tensor& targets = batch_y[i].to(device); torch::Tensor predicted = predict(input).to(device); //dump_dim(std::string("predicted"), predicted); //dump_dim(std::string("targets"), targets); torch::Tensor loss; if (classification) { loss = torch::nll_loss(predicted, targets.view_as(predicted).argmax(1)); } else { loss = torch::mse_loss(predicted.view_as(targets), targets); } AT_ASSERT(!std::isnan(loss.template item<float_t>())); //dump_dim(std::string("loss"), loss); //std::cout << loss << std::endl; loss_list[i] = loss.template item<float_t>(); } for (size_t i = 0; i < batchNum; i++) { sum_loss += loss_list[i]; } return sum_loss/ BatchSize; } void set_tolerance(const float max_tol, const float min_tol, int div = 5) { if (div < 3) div = 3; Tolerance_Set.resize(div); for (int i = 0; i < div; i++) { Tolerance_Set[i] = (max_tol + i*(min_tol - max_tol) / (div - 1.0)); } } std::vector<float_t>& get_tolerance() { return Tolerance_Set; } /* * output vector output[0..tolerance_set.size()-1]=num_success, output[tolerance_set.size()]=image of size, */ std::vector<int> get_accuracy(tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels, std::vector<float_t>& tolerance_set) { std::vector<int> result(tolerance_set.size()+1); if (images.size() == 0) { return result; } result[tolerance_set.size()] = images.size(); for (int i = 0; i < images.size(); i++) { tiny_dnn::vec_t& predict_y = predict(images[i]); const tiny_dnn::vec_t& actual = labels[i]; AT_ASSERT(predict_y.size() == actual.size()); float sum = 0.0; for (int k = 0; k < predict_y.size(); k++) { sum += (predict_y[k] - actual[k])*(predict_y[k] - actual[k]); } sum /= predict_y.size(); for (int j = 0; j < tolerance_set.size(); j++) { if (sum < tolerance_set[j]) { result[j]++; } } } return result; } tiny_dnn::result get_accuracy( tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels, int batch = 1) { tiny_dnn::result result; if (images.size() == 0) { result.num_total = 1; return result; } const size_t sz = images.size(); #if 10 std::vector< tiny_dnn::label_t> predicted_list(sz, 0); std::vector< tiny_dnn::label_t>actual_list(sz, 0); std::vector<tiny_dnn::vec_t>& n_predict_y = predict(images, batch); #pragma omp parallel for for (int i = 0; i < sz; i++) { //tiny_dnn::vec_t& predict_y = predict(images[i]); tiny_dnn::vec_t& predict_y = n_predict_y[i]; predicted_list[i] = vec_max_index(predict_y); actual_list[i] = vec_max_index(labels[i]); } for (int i = 0; i < sz; i++) { if (predicted_list[i] == actual_list[i]) result.num_success++; result.num_total++; result.confusion_matrix[predicted_list[i]][actual_list[i]]++; } #else for (int i = 0; i < sz; i++) { tiny_dnn::vec_t& predict_y = predict(images[i]); const tiny_dnn::label_t predicted = vec_max_index(predict_y); const tiny_dnn::label_t actual = vec_max_index(labels[i]); if (predicted == actual) result.num_success++; result.num_total++; result.confusion_matrix[predicted][actual]++; } #endif return result; } std::vector<int> test_tolerance(tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels) { AT_ASSERT(Tolerance_Set.size() != 0); return get_accuracy(images, labels, Tolerance_Set); } // labels[#] = 0,1,..class-1 tiny_dnn::result get_accuracy(tiny_dnn::tensor_t& images, std::vector <tiny_dnn::label_t>& labels) { std::vector<tiny_dnn::vec_t> vec; label2vec(labels, vec); //on-hot-vector return get_accuracy(images, vec); } tiny_dnn::result test(tiny_dnn::tensor_t& images, tiny_dnn::tensor_t& labels) { return get_accuracy(images, labels); } // labels[#] = on-hot-vector tiny_dnn::result test(tiny_dnn::tensor_t& images, std::vector <tiny_dnn::label_t>& labels) { return get_accuracy(images, labels); } inline int in_data_size() const { return in_channels * in_H*in_W; } inline int out_data_size() const { return out_channels * out_H*out_W; } inline void save(std::string& filename) { torch::save(model, filename); } inline void load(std::string& filename) { try { //CUDA information is also included in the data that has been learned and serialized by CUDA. // map_location = device torch::load(model, filename/*, device*/); model.get()->to(device); } catch (c10::Error& err) { printf("load error[%s]\n", err.what()); } } }; void print_ConfusionMatrix(tiny_dnn::result& res) { //ConfusionMatrix std::cout << "ConfusionMatrix:" << std::endl; res.print_detail(std::cout); std::cout << res.num_success << "/" << res.num_total << std::endl; res.print_summary(std::cout); //printf("accuracy:%.3f%%\n", res.accuracy()); } void print_ConfusionMatrix(std::vector<int>& res, std::vector<float_t>& tol) { //ConfusionMatrix std::cout << "ConfusionMatrix:" << std::endl; for (int i = 0; i < res.size()-1; i++) { printf("tolerance:%.4f %d / %d accuracy:%.3f%%\n", tol[i], res[i], res.back(), 100.0*(float_t)res[i] / (float_t)res.back()); } } } #endif
GB_unaryop__ainv_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_uint8 // op(A') function: GB_tran__ainv_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_uint8 ( uint8_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RCCE_lib.h
// // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef RCCE_LIB_H #define RCCE_LIB_H #include "RCCE.h" #ifdef _OPENMP #include <omp.h> #endif #include <string.h> /* PAD32byte is used to compute a cacheline padded length of n (input) bytes */ #define PAD32byte(n) ((n)%32==0 ? (n) : (n) + 32 - (n)%32) //#define BITSPERCHAR 8 #define BOTH_IN_COMM_BUFFER 12 #define SOURCE_IN_PRIVATE_MEMORY 34 #define TARGET_IN_PRIVATE_MEMORY 56 #define RCCE_SUM_INT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_SUM_LONG (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_SUM_FLOAT (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_SUM_DOUBLE (RCCE_SUM+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_MAX_INT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_MAX_LONG (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_MAX_FLOAT (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_MAX_DOUBLE (RCCE_MAX+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_MIN_INT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_MIN_LONG (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_MIN_FLOAT (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_MIN_DOUBLE (RCCE_MIN+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_PROD_INT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_INT)) #define RCCE_PROD_LONG (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_LONG)) #define RCCE_PROD_FLOAT (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_FLOAT)) #define RCCE_PROD_DOUBLE (RCCE_PROD+(RCCE_NUM_OPS)*(RCCE_DOUBLE)) #define RCCE_COMM_INITIALIZED 45328976 #define RCCE_COMM_NOT_INITIALIZED -45328976 // auxiliary MPB pointer type typedef volatile unsigned int* t_vintp; // Also need dereferenced types typedef volatile unsigned char t_vchar; typedef volatile unsigned int t_vint; typedef struct rcce_block { t_vcharp space; // pointer to space for data in block size_t free_size; // actual free space in block (0 or whole block) struct rcce_block *next; // pointer to next block in circular linked list } RCCE_BLOCK; #ifdef SINGLEBITFLAGS typedef struct rcce_flag_line { char flag[RCCE_LINE_SIZE]; t_vcharp line_address; int members; struct rcce_flag_line *next; } RCCE_FLAG_LINE; #endif typedef struct { RCCE_BLOCK *tail; // "last" block in linked list of blocks } RCCE_BLOCK_S; #ifndef GORY extern RCCE_FLAG RCCE_sent_flag[RCCE_MAXNP]; extern RCCE_FLAG RCCE_ready_flag[RCCE_MAXNP]; extern t_vcharp RCCE_buff_ptr; extern size_t RCCE_chunk; extern t_vcharp RCCE_flags_start; #endif extern t_vcharp RCCE_comm_buffer[RCCE_MAXNP]; extern int RCCE_NP; extern int RCCE_BUFF_SIZE; #ifndef COPPERRIDGE extern omp_lock_t RCCE_corelock[RCCE_MAXNP]; extern t_vchar RC_comm_buffer[RCCE_MAXNP*RCCE_BUFF_SIZE_MAX]; #endif extern int RC_MY_COREID; extern int RC_COREID[RCCE_MAXNP]; extern double RC_REFCLOCKGHZ; extern int RCCE_IAM; extern int RCCE_debug_synch; extern int RCCE_debug_comm; extern int RCCE_debug_debug; extern int RCCE_debug_RPC; #ifdef SINGLEBITFLAGS extern RCCE_FLAG_LINE RCCE_flags; extern int WORDSIZE; extern int LEFTMOSTBIT; RCCE_FLAG_STATUS RCCE_bit_value(t_vcharp, int); RCCE_FLAG_STATUS RCCE_flip_bit_value(t_vcharp, int); int RCCE_write_bit_value(t_vcharp, int, RCCE_FLAG_STATUS); #endif extern int RCCE_comm_init_val; void RCCE_malloc_init(t_vcharp, size_t); int RCCE_qsort(char *, size_t, size_t, int (*)(const void*, const void*)); int id_compare(const void *, const void *); int RCCE_probe(RCCE_FLAG); int RCCE_error_return(int, int); void RC_cache_invalidate(void); int RCCE_acquire_lock(int); int RCCE_release_lock(int); int RCCE_global_color(int, void *); t_vcharp RC_COMM_BUFFER_START(int); #ifndef GORY t_vcharp RCCE_malloc(size_t); t_vcharp RCCE_malloc_request(size_t, size_t *); void RCCE_free(t_vcharp); int RCCE_put(t_vcharp, t_vcharp, int, int); int RCCE_get(t_vcharp, t_vcharp, int, int); int RCCE_wait_until(RCCE_FLAG, RCCE_FLAG_STATUS); int RCCE_flag_alloc(RCCE_FLAG *); int RCCE_flag_free(RCCE_FLAG *); int RCCE_flag_write(RCCE_FLAG *, RCCE_FLAG_STATUS, int); int RCCE_flag_read(RCCE_FLAG, RCCE_FLAG_STATUS *, int); #endif #ifdef _OPENMP #pragma omp threadprivate (RC_COREID, RC_MY_COREID, RC_REFCLOCKGHZ) #pragma omp threadprivate (RCCE_comm_buffer) #pragma omp threadprivate (RCCE_BUFF_SIZE) #pragma omp threadprivate (RCCE_IAM, RCCE_NP) #pragma omp threadprivate (RCCE_debug_synch, RCCE_debug_comm, RCCE_debug_debug) #ifdef SINGLEBITFLAGS #pragma omp threadprivate (RCCE_flags, WORDSIZE, LEFTMOSTBIT) #endif #ifndef GORY #pragma omp threadprivate (RCCE_sent_flag, RCCE_ready_flag) #pragma omp threadprivate (RCCE_buff_ptr, RCCE_chunk) #pragma omp threadprivate (RCCE_flags_start) #endif #endif #endif
GB_unop__identity_fp64_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_bool // op(A') function: GB_unop_tran__identity_fp64_bool // C type: double // A type: bool // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_bool ( double *Cx, // Cx and Ax may be aliased const bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr71371.c
/* PR middle-end/71371 */ /* { dg-do compile } */ void baz (int *); void foo (void) { int i; #pragma omp taskloop for (i = 0; i < 100; i++) baz (&i); } void bar (void) { int i; #pragma omp parallel { #pragma omp for for (i = 0; i < 100; i++) baz (&i); } }
GB_binop__pow_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_int16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int16) // C=scalar+B GB (_bind1st__pow_int16) // C=scalar+B' GB (_bind1st_tran__pow_int16) // C=A+scalar GB (_bind2nd__pow_int16) // C=A'+scalar GB (_bind2nd_tran__pow_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_pow_int16 (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_int16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT16 || GxB_NO_POW_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_int16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_int16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
/* Il programma funziona solo de il numero di elementi dell'array è esattamente divisibile per il numero di core. Per esempio con N = 8 e t = 4. */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int N, i, t, nloc; float sumtot, sum, *a; sumtot = 0; printf("Inserire N: "); scanf("%d", &N); a = (float*)calloc(N,sizeof(float)); //array allocato dinamicamente. printf("Inserire i numeri da sommare: \n"); for(i = 0; i < N; i++) { scanf("%f", &a[i]); } #pragma omp parallel private(sum, nloc, i) shared(sumtot) { t = omp_get_num_threads(); nloc = N/t; printf("Sono il thread %d, di %d: numeri %d\n",omp_get_thread_num(),t,nloc); sum = 0; for(i = 0; i < nloc; i++) { sum = sum+a[i+nloc*omp_get_thread_num()]; } sumtot+=sum; } printf("La somma totale e': %f\n", sumtot); }
hypergraph_cut.h
#ifndef HYPERGRAPHCUT_H #define HYPERGRAPHCUT_H #include "submodular.h" #include <vector> #include "../la/vector.h" template<class DT> class HyperEdge { public: std::vector<int64_t> v; DT w; HyperEdge(std::vector<int64_t> v_in, DT w_in) : v(v_in), w(w_in) {} }; template<class DT> class HyperCut final : public SubmodularFunction<DT> { public: int64_t n; std::vector<HyperEdge<DT>> edges; HyperCut(int64_t n_in) : SubmodularFunction<DT>(n_in), n(n_in) { GeneralizedWattsStrogatz(4, 2, 0.25); } void GeneralizedWattsStrogatz(int64_t k, int64_t r, double beta) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<double> weight_dist(0.0, 1.0); std::uniform_real_distribution<double> connect_dist(0.0, 1.0); std::uniform_int_distribution<int64_t> uniform_node(0, n-1); edges.reserve(k*n); //Each vertex is connected to k nearest neighbors //Each hyper edge connects to r vertices for(int64_t i = 0; i < n; i++) { for(int64_t p = 1; p < k/2; p++) { //Construct hyperedge. Connect to current vertex and adjacent vertices std::vector<int64_t> vertices; vertices.reserve(r); vertices.push_back(i); for(int64_t q = 0; q < r; q++) { vertices.push_back((i + p + q) % n); } edges.emplace_back(vertices, weight_dist(gen)); } } //With probability beta, rewire each vertex in edge for(auto e : edges) { for(uint64_t i = 0; i < e.v.size(); i++) { if(connect_dist(gen) < beta) { int64_t new_v = e.v[i]; int64_t k = 0; do { new_v = uniform_node(gen); if(k++ > 100) { //Give up rewiring. new_v = e.v[i]; break; } } while(std::find(e.v.begin(), e.v.end(), new_v) != e.v.end()); e.v[i] = new_v; } } } } DT eval(const std::vector<bool>& A) override { DT val = 0.0; #pragma omp parallel for reduction(+ : val) for(uint64_t j = 0; j < edges.size(); j++) { bool is_cut = false; for(uint64_t i = 1; i < edges[j].v.size(); i++) { if(A[edges[j].v[i]] != A[edges[j].v[0]]) { is_cut = true; break; } } if(is_cut) val += edges[j].w; } return val; } void gains(const std::vector<int64_t>& perm, Vector<DT>& x) override { std::vector<int64_t> perm_lookup(n); _Pragma("omp parallel for") for(int64_t i = 0; i < n; i++) { perm_lookup[perm[i]] = i; } x.set_all(0.0); //Iterate over every edge. #pragma omp parallel for for(auto it = edges.begin(); it < edges.end(); it++) { auto & e = *it; //There is a gain when the first vertex is added, //and a loss when the last vertex is added int64_t first = e.v[0]; int64_t last = e.v[0]; for(auto v : e.v) { if(perm_lookup[v] <= perm_lookup[first]) first = v; if(perm_lookup[v] >= perm_lookup[last]) last = v; } #pragma omp atomic x(first) += e.w; #pragma omp atomic x(last) -= e.w; } } }; #endif
ex1-vector-addition-openmp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define VALIDATE 0 #if VALIDATE #include "validate.h" #endif void vec_add(const size_t, const int * restrict, const int * restrict, int * restrict); void usage(char**); int main(int argc, char **argv) { int *u,*v,*w; size_t i,n; double t0,t1; if(argc==2) sscanf(argv[1],"%zu",&n); else { usage(argv); return 1; } u = (int*)malloc(n*sizeof(int)); v = (int*)malloc(n*sizeof(int)); w = (int*)malloc(n*sizeof(int)); for(i=0; i<n; ++i) u[i]=v[i]=i; t0 = omp_get_wtime(); vec_add(n,u,v,w); t1 = omp_get_wtime(); #if VALIDATE if(!validate_vec_add(n,u,v,w)) { printf("Validation failed.\n"); return 1; } #endif printf("Total time taken: %f.\n",t1-t0); free(u); free(v); free(w); return 0; } void vec_add(const size_t n, const int * restrict u, const int * restrict v, int * restrict w) { int id,nt; size_t i,is,ie; #pragma omp parallel default(none) shared(u,v,w) private(i,is,ie,id,nt) { id = omp_get_thread_num(); nt = omp_get_num_threads(); is = id*n/nt; ie = (id==nt-1) ? n : (id+1)*n/nt; for(i=is; i<ie; ++i) w[i]=u[i]+v[i]; } } void usage(char **argv) { printf("Usage: %s <length>\n",argv[0]); }
GB_binop__lt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_int8 // A.*B function (eWiseMult): GB_AemultB__lt_int8 // A*D function (colscale): GB_AxD__lt_int8 // D*A function (rowscale): GB_DxB__lt_int8 // C+=B function (dense accum): GB_Cdense_accumB__lt_int8 // C+=b function (dense accum): GB_Cdense_accumb__lt_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_int8 // C=scalar+B GB_bind1st__lt_int8 // C=scalar+B' GB_bind1st_tran__lt_int8 // C=A+scalar GB_bind2nd__lt_int8 // C=A'+scalar GB_bind2nd_tran__lt_int8 // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lt_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mandelbrot.c
/* * mandelbrot.c: edge detection * (c)2010-2018 Seiji Nishimura * $Id: mandelbrot.c,v 1.1.1.3 2018/09/11 00:00:00 seiji Exp seiji $ */ #include <math.h> #include <pixmap.h> #include <palette.h> #include <stdbool.h> #define ROUND(x) ((int) round(x)) #define MIN(x,y) (((x)<(y))?(x):(y)) #define MAX(x,y) (((x)>(y))?(x):(y)) // prototypes void colormap_init (pixel_t *, int); void draw_image (pixmap_t *, pixmap_t *, pixel_t *, int, double, double, double); void rough_sketch (pixmap_t *, pixel_t *, int, double, double, double); int mandelbrot (int, double, double); bool detect_edge (pixmap_t *, pixel_t *, int, int); bool equivalent_color(pixel_t, pixel_t); //====================================================================== int main(int argc, char **argv) { pixmap_t image, sketch; pixel_t colormap[ITER_MAX]; pixmap_create(&image , WIDTH, HEIGHT); pixmap_create(&sketch, WIDTH, HEIGHT); colormap_init(colormap, ITER_MAX); draw_image(&image, &sketch, colormap, ITER_MAX, CENTER_R, CENTER_I, RADIUS); pixmap_write_ppmfile(&image, "output.pbm"); pixmap_destroy(&sketch); pixmap_destroy(&image ); return 0; } //---------------------------------------------------------------------- void colormap_init(pixel_t *colormap, int iter_max) { int colormap_mask = COLORMAP_CYCLE - 1; colormap[0] = pixel_set_rgb(0x00, 0x00, 0x00); for (int i = 1; i < iter_max; i++) #ifdef REVERSE_COLORMAP colormap[i] = palette(COLORMAP_TYPE, 0x00, colormap_mask, colormap_mask - (i & colormap_mask)); #else colormap[i] = palette(COLORMAP_TYPE, 0x00, colormap_mask, i & colormap_mask ); #endif return; } //---------------------------------------------------------------------- void draw_image(pixmap_t *image, pixmap_t *sketch, pixel_t *colormap, int iter_max, double c_r, double c_i, double radius) { // draw edge image. const pixel_t black = pixel_set_rgb(0x00, 0x00, 0x00), white = pixel_set_rgb(0xff, 0xff, 0xff); rough_sketch(sketch, colormap, iter_max, c_r, c_i, radius); #pragma omp parallel for schedule(static,1) for (int xy = 0; xy < WIDTH * HEIGHT; xy++) { const int x = xy % WIDTH, y = xy / WIDTH; bool edge; pixel_t pixel; edge = detect_edge(sketch, &pixel, x, y); #ifdef USE_MONOCHROME pixel = edge ? white : black; #else pixel = edge ? white : pixel; #endif pixmap_put_pixel(image, pixel, x, y); } return; } //---------------------------------------------------------------------- void rough_sketch(pixmap_t *sketch, pixel_t *colormap, int iter_max, double c_r, double c_i, double radius) { int iter_mask = iter_max - 1; int width, height; double d; pixmap_get_size(sketch, &width, &height); d = 2.0 * radius / MIN(width, height); #pragma omp parallel for schedule(static,1) for (int xy = 0; xy < WIDTH * HEIGHT; xy++) { int x = xy % WIDTH, y = xy / WIDTH; double p_r = c_r + d * (x - width / 2), p_i = c_i + d * (height / 2 - y); int iter = mandelbrot(iter_max, p_r, p_i); pixmap_put_pixel(sketch, colormap[iter & iter_mask], x, y); } return; } //---------------------------------------------------------------------- int mandelbrot(int iter_max, double p_r, double p_i) { // kernel function (scalar version) int i; double z_r, z_i, work; z_r = p_r; z_i = p_i; work = 2.0 * z_r * z_i; for (i = 1; i < iter_max && (z_r *= z_r) + (z_i *= z_i) < 4.0; i++) { z_r += p_r - z_i ; z_i = p_i + work; work = 2.0 * z_r * z_i; } return i; } //---------------------------------------------------------------------- bool detect_edge(pixmap_t *pixmap, pixel_t *pixel, int x, int y) { int width, height; pixmap_get_size (pixmap, &width, &height); pixmap_get_pixel(pixmap, pixel, x, y); for (int j = MAX(0, y - 1); j <= MIN(height - 1, y + 1); j++) for (int i = MAX(0, x - 1); i <= MIN(width - 1, x + 1); i++) if (i != x || j != y) { pixel_t p; pixmap_get_pixel(pixmap, &p, i, j); if (!equivalent_color(*pixel, p)) return true; } return false; } //---------------------------------------------------------------------- bool equivalent_color(pixel_t p, pixel_t q) #ifdef USE_SAME_COLOR { return pixel_get_r(p) == pixel_get_r(q) && pixel_get_g(p) == pixel_get_g(q) && pixel_get_b(p) == pixel_get_b(q); } #else //...................................... { int dr, dg, db; dr = pixel_get_r(p) - pixel_get_r(q); dg = pixel_get_g(p) - pixel_get_g(q); db = pixel_get_b(p) - pixel_get_b(q); return (fabs(0.299 * dr + 0.587 * dg + 0.114 * db) < 1.5 || fabs(0.213 * dr + 0.715 * dg + 0.072 * db) < 1.5) && fabs(0.596 * dr - 0.274 * dg - 0.322 * db) < 4.2; } #endif
sections-4.c
/* PR c++/24613 */ /* { dg-compile } */ #pragma omp section /* { dg-error "may only be used in" } */ int i; void foo (void) { #pragma omp section /* { dg-error "may only be used in" } */ i++; }
utils.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <iostream> #include <vector> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #ifdef _WIN32 #define GLOG_NO_ABBREVIATED_SEVERITIES #include <windows.h> #else #include <dirent.h> #include <sys/types.h> #endif namespace PaddleSolution { namespace utils { inline std::string path_join(const std::string& dir, const std::string& path) { std::string seperator = "/"; #ifdef _WIN32 seperator = "\\"; #endif return dir + seperator + path; } #ifndef _WIN32 // scan a directory and get all files with input extensions inline std::vector<std::string> get_directory_images( const std::string& path, const std::string& exts) { std::vector<std::string> imgs; struct dirent *entry; DIR *dir = opendir(path.c_str()); if (dir == NULL) { closedir(dir); return imgs; } while ((entry = readdir(dir)) != NULL) { std::string item = entry->d_name; auto ext = strrchr(entry->d_name, '.'); if (!ext || std::string(ext) == "." || std::string(ext) == "..") { continue; } if (exts.find(ext) != std::string::npos) { imgs.push_back(path_join(path, entry->d_name)); } } return imgs; } #else // scan a directory and get all files with input extensions inline std::vector<std::string> get_directory_images( const std::string& path, const std::string& exts) { std::string pattern(path); pattern.append("\\*"); std::vector<std::string> imgs; WIN32_FIND_DATA data; HANDLE hFind; if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) { do { auto fname = std::string(data.cFileName); auto pos = fname.rfind("."); auto ext = fname.substr(pos + 1); if (ext.size() > 1 && exts.find(ext) != std::string::npos) { imgs.push_back(path + "\\" + data.cFileName); } } while (FindNextFile(hFind, &data) != 0); FindClose(hFind); } return imgs; } #endif // normalize and HWC_BGR -> CHW_RGB inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean, std::vector<float>& fstd) { int rh = im.rows; int rw = im.cols; int rc = im.channels(); double normf = static_cast<double>(1.0) / 255.0; #pragma omp parallel for for (int h = 0; h < rh; ++h) { const uchar* ptr = im.ptr<uchar>(h); int im_index = 0; for (int w = 0; w < rw; ++w) { for (int c = 0; c < rc; ++c) { int top_index = (c * rh + h) * rw + w; float pixel = static_cast<float>(ptr[im_index++]); pixel = (pixel * normf - fmean[c]) / fstd[c]; data[top_index] = pixel; } } } } // flatten a cv::mat inline void flatten_mat(cv::Mat& im, float* data) { int rh = im.rows; int rw = im.cols; int rc = im.channels(); #pragma omp parallel for for (int h = 0; h < rh; ++h) { const uchar* ptr = im.ptr<uchar>(h); int im_index = 0; int top_index = h * rw * rc; for (int w = 0; w < rw; ++w) { for (int c = 0; c < rc; ++c) { float pixel = static_cast<float>(ptr[im_index++]); data[top_index++] = pixel; } } } } // argmax inline void argmax(float* out, std::vector<int>& shape, std::vector<uchar>& mask, std::vector<uchar>& scoremap) { int out_img_len = shape[1] * shape[2]; int blob_out_len = out_img_len * shape[0]; float max_value = -1; int label = 0; #pragma omp parallel private(label) for (int i = 0; i < out_img_len; ++i) { max_value = -1; label = 0; #pragma omp for reduction(max : max_value) for (int j = 0; j < shape[0]; ++j) { int index = i + j * out_img_len; if (index >= blob_out_len) { continue; } float value = out[index]; if (value > max_value) { max_value = value; label = j; } } if (label == 0) max_value = 0; mask[i] = uchar(label); scoremap[i] = uchar(max_value * 255); } } } // namespace utils } // namespace PaddleSolution
GB_unaryop__ainv_int8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_uint64 // op(A') function: GB_tran__ainv_int8_uint64 // C type: int8_t // A type: uint64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_uint64 ( int8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__isne_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_08__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_04__isne_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int16) // A*D function (colscale): GB (_AxD__isne_int16) // D*A function (rowscale): GB (_DxB__isne_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int16) // C=scalar+B GB (_bind1st__isne_int16) // C=scalar+B' GB (_bind1st_tran__isne_int16) // C=A+scalar GB (_bind2nd__isne_int16) // C=A'+scalar GB (_bind2nd_tran__isne_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_split_full_template.c
//------------------------------------------------------------------------------ // GB_split_full_template: split a full matrix into a full tile //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C and the tile A //-------------------------------------------------------------------------- const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; int64_t pC ; #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (pC = 0 ; pC < cnz ; pC++) { int64_t i = pC % cvlen ; int64_t j = pC / cvlen ; int64_t iA = aistart + i ; int64_t jA = avstart + j ; int64_t pA = iA + jA * avlen ; // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA) ; } done = true ; } #undef GB_CTYPE
tinyexr.h
#ifndef TINYEXR_H_ #define TINYEXR_H_ /* Copyright (c) 2014 - 2021, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) #define TINYEXR_X86_OR_X64_CPU 1 #else #define TINYEXR_X86_OR_X64_CPU 0 #endif #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU #define TINYEXR_LITTLE_ENDIAN 1 #else #define TINYEXR_LITTLE_ENDIAN 0 #endif // Use miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 // tile format image; // not zero for only a single-part "normal" tiled file (according to spec.) int tiled; int long_name; // long name attribute // deep image(EXR 2.0); // for a multi-part file, indicates that at least one part is of type deep* (according to spec.) int non_image; int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRBox2i { int min_x; int min_y; int max_x; int max_y; } EXRBox2i; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; EXRBox2i data_window; EXRBox2i display_window; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; // for a single-part file, agree with the version field bit 11 // for a multi-part file, it is consistent with the type of part int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) // name attribute required for multipart files; // must be unique and non empty (according to spec.); // use EXRSetNameAttr for setting value; // max 255 character allowed - excluding terminating zero char name[256]; } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. struct _EXRImage* next_level; // NULL if scanline format or image is the last level. int level_x; // x level index int level_y; // y level index unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image // contains A(single channel alpha) or RGB(A) channels. Application must free // image data as returned by `out_rgba` Result image format is: float x RGBA x // width x hight Returns negative value and may set error string in `err` when // there's an error When the specified layer name is not found in the EXR file, // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory // after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(will be filled when the function returns error // code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Returns the number of resolution levels of the image (including the base) extern int EXRNumLevels(const EXRImage* exr_image); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Set name attribute of EXRHeader struct (it makes a copy) extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Frees internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Frees internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Frees error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRMultipartImageToFile(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEFINED #define TINYEXR_IMPLEMENTATION_DEFINED #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #ifndef NOMINMAX #define NOMINMAX #endif #include <windows.h> // for UTF-8 #endif #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #include <set> // https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support #if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900) #define TINYEXR_HAS_CXX11 (1) // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #include <miniz/miniz.h> #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Weverything" #endif #include "zfp.h" #ifdef __clang__ #pragma clang diagnostic pop #endif #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(float *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else float tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 union FP32 { unsigned int u; float f; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(&outLen); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int requested_pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { int min_x; int min_y; int max_x; int max_y; } Box2iInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; Box2iInfo data_window; int line_order; Box2iInfo display_window; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tiled; // Non-zero if the part is tiled. int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; // required for multi-part or non-image files std::string name; // required for multi-part or non-image files std::string type; void clear() { channels.clear(); attributes.clear(); data_window.min_x = 0; data_window.min_y = 0; data_window.max_x = 0; data_window.max_y = 0; line_order = 0; display_window.min_x = 0; display_window.min_y = 0; display_window.max_x = 0; display_window.max_y = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tiled = 0; tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; name.clear(); type.clear(); } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(&info.pixel_type); tinyexr::swap4(&info.x_sampling); tinyexr::swap4(&info.y_sampling); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].requested_pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(&pixel_type); tinyexr::swap4(&x_sampling); tinyexr::swap4(&y_sampling); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // mz_ulong outSize = mz_compressBound(src_size); int ret = mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressible run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierarchical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- unsigned int len : 8; // code length 0 unsigned int lit : 24; // lit p size unsigned int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new unsigned int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ /* TinyEXR issue 160. in + 1 -> in */ if (in >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSizeInBytes, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSizeInBytes) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSizeInBytes / sizeof(unsigned short)); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSizeInBytes / sizeof(unsigned short))); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; unsigned int precision; unsigned int __pad0; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* unsigned int __pad1; ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0; } }; static bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes, std::string *err) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) { if (attributes[i].size == 1) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; break; } else { if (err) { (*err) += "zfpCompressionType attribute must be uchar(1 byte) type.\n"; } return false; } } } if (!foundType) { if (err) { (*err) += "`zfpCompressionType` attribute not found.\n"; } return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionRate` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionPrecision` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionTolerance` attribute not found.\n"; } } else { if (err) { (*err) += "Unknown value specified for `zfpCompressionType`.\n"; } } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, size_t num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = size_t(dst_width) * size_t(dst_num_lines) * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, static_cast<unsigned int>(dst_width), static_cast<unsigned int>(dst_num_lines) * static_cast<unsigned int>(num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = size_t(dst_width) * size_t(dst_num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // decompress 4x4 pixel block. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) { for (size_t x = 0; x < size_t(dst_width); x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. static bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, static_cast<unsigned int>(width), static_cast<unsigned int>(num_lines * num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = size_t(width) * size_t(num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // compress 4x4 pixel block. for (size_t y = 0; y < size_t(num_lines); y += 4) { for (size_t x = 0; x < size_t(width); x += 4) { float fblock[16]; for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp)); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // heuristics #define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192) // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; std::string e; if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { // Here, data_width and data_height are the dimensions of the current (sub)level. if (tile_size_x * tile_offset_x > data_width || tile_size_y * tile_offset_y > data_height) { return false; } // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } #ifdef _WIN32 static inline std::wstring UTF8ToWchar(const std::string &str) { int wstr_size = MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0); std::wstring wstr(wstr_size, 0); MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0], (int)wstr.size()); return wstr; } #endif static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; bool has_name = false; bool has_type = false; info->name.clear(); info->type.clear(); info->data_window.min_x = 0; info->data_window.min_y = 0; info->data_window.max_x = 0; info->data_window.max_y = 0; info->line_order = 0; // @fixme info->display_window.min_x = 0; info->display_window.min_y = 0; info->display_window.max_x = 0; info->display_window.max_y = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tiled = 0; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; // For a multipart file, the version field 9th bit is 0. if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) || y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { if (err) { (*err) = "Tile sizes were invalid."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; info->tiled = 1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->data_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->data_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->data_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->data_window.min_x); tinyexr::swap4(&info->data_window.min_y); tinyexr::swap4(&info->data_window.max_x); tinyexr::swap4(&info->data_window.max_y); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->display_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->display_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->display_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->display_window.min_x); tinyexr::swap4(&info->display_window.min_y); tinyexr::swap4(&info->display_window.max_x); tinyexr::swap4(&info->display_window.max_y); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4(&info->pixel_aspect_ratio); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4(&info->screen_window_center[0]); tinyexr::swap4(&info->screen_window_center[1]); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4(&info->screen_window_width); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(&info->chunk_count); } } else if (attr_name.compare("name") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->name.resize(len); info->name.assign(reinterpret_cast<const char*>(&data[0]), len); has_name = true; } } else if (attr_name.compare("type") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->type.resize(len); info->type.assign(reinterpret_cast<const char*>(&data[0]), len); has_type = true; } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (version->multipart || version->non_image) { if (!has_name) { ss_err << "\"name\" attribute not found in the header." << std::endl; } if (!has_type) { ss_err << "\"type\" attribute not found in the header." << std::endl; } } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window.min_x = info.display_window.min_x; exr_header->display_window.min_y = info.display_window.min_y; exr_header->display_window.max_x = info.display_window.max_x; exr_header->display_window.max_y = info.display_window.max_y; exr_header->data_window.min_x = info.data_window.min_x; exr_header->data_window.min_y = info.data_window.min_y; exr_header->data_window.max_x = info.data_window.max_x; exr_header->data_window.max_y = info.data_window.max_y; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tiled = info.tiled; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; EXRSetNameAttr(exr_header, info.name.c_str()); if (!info.type.empty()) { if (info.type == "scanlineimage") { assert(!exr_header->tiled); } else if (info.type == "tiledimage") { assert(exr_header->tiled); } else if (info.type == "deeptile") { exr_header->non_image = 1; assert(exr_header->tiled); } else if (info.type == "deepscanline") { exr_header->non_image = 1; assert(!exr_header->tiled); } else { assert(false); } } exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy pointer exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } struct OffsetData { OffsetData() : num_x_levels(0), num_y_levels(0) {} std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets; int num_x_levels; int num_y_levels; }; int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { switch (tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: return 0; case TINYEXR_TILE_MIPMAP_LEVELS: return lx; case TINYEXR_TILE_RIPMAP_LEVELS: return lx + ly * num_x_levels; default: assert(false); } return 0; } static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) { assert(level >= 0); int b = (int)(1u << (unsigned)level); int level_size = toplevel_size / b; if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size) level_size += 1; return std::max(level_size, 1); } static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, const OffsetData& offset_data, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const unsigned char* head, const size_t size, std::string* err) { int num_channels = exr_header->num_channels; int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); int num_tiles = num_x_tiles * num_y_tiles; int err_code = TINYEXR_SUCCESS; enum { EF_SUCCESS = 0, EF_INVALID_DATA = 1, EF_INSUFFICIENT_DATA = 2, EF_FAILED_TO_DECODE = 4 }; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<unsigned> error_flag(EF_SUCCESS); #else unsigned error_flag(EF_SUCCESS); #endif // Although the spec says : "...the data window is subdivided into an array of smaller rectangles...", // the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window. #if 0 if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) && exr_image->level_x == 0 && exr_image->level_y == 0) { if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } #endif exr_image->tiles = static_cast<EXRTile*>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); int x_tile = tile_idx % num_x_tiles; int y_tile = tile_idx / num_x_tiles; // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile]; if (offset + sizeof(int) * 5 > size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } size_t data_size = size_t(size - (offset + sizeof(int) * 5)); const unsigned char* data_ptr = reinterpret_cast<const unsigned char*>(head + offset); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(&tile_coordinates[0]); tinyexr::swap4(&tile_coordinates[1]); tinyexr::swap4(&tile_coordinates[2]); tinyexr::swap4(&tile_coordinates[3]); if (tile_coordinates[2] != exr_image->level_x) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } if (tile_coordinates[3] != exr_image->level_y) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(&data_len); if (data_len < 2 || size_t(data_len) > data_size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, exr_image->width, exr_image->height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // Failed to decode tile data. error_flag |= EF_FAILED_TO_DECODE; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto& t : workers) { t.join(); } #else } // parallel for #endif // Even in the event of an error, the reserved memory may be freed. exr_image->num_channels = num_channels; exr_image->num_tiles = static_cast<int>(num_tiles); if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA; if (err) { if (error_flag & EF_INSUFFICIENT_DATA) { (*err) += "Insufficient data length.\n"; } if (error_flag & EF_FAILED_TO_DECODE) { (*err) += "Failed to decode tile data.\n"; } } return err_code; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const OffsetData& offset_data, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, exr_header->custom_attributes, int(exr_header->num_custom_attributes), err)) { return TINYEXR_ERROR_INVALID_HEADER; } #endif } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_y < exr_header->data_window.min_y) { if (err) { (*err) += "Invalid data window.\n"; } return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tiled) { if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x << ", " << "tile height = " << exr_header->tile_size_y << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } } const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) { EXRImage* level_image = NULL; for (int level = 0; level < offset_data.num_x_levels; ++level) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode); level_image->level_x = level; level_image->level_y = level; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } else { EXRImage* level_image = NULL; for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y) for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode); level_image->level_x = level_x; level_image->level_y = level_y; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(&line_no); tinyexr::swap4(&data_len); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window.max_y + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window.min_y); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window.min_y; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(&y); tinyexr::swap4(&data_len); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int FloorLog2(unsigned x) { // // For x > 0, floorLog2(y) returns floor(log(x)/log(2)). // int y = 0; while (x > 1) { y += 1; x >>= 1u; } return y; } static int CeilLog2(unsigned x) { // // For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)). // int y = 0; int r = 0; while (x > 1) { if (x & 1) r = 1; y += 1; x >>= 1u; } return y + r; } static int RoundLog2(int x, int tile_rounding_mode) { return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x)); } static int CalculateNumXLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int w = max_x - min_x + 1; num = RoundLog2(w, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static int CalculateNumYLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int h = max_y - min_y + 1; num = RoundLog2(h, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static void CalculateNumTiles(std::vector<int>& numTiles, int toplevel_size, int size, int tile_rounding_mode) { for (unsigned i = 0; i < numTiles.size(); i++) { int l = LevelSize(toplevel_size, i, tile_rounding_mode); assert(l <= std::numeric_limits<int>::max() - size + 1); numTiles[i] = (l + size - 1) / size; } } static void PrecalculateTileInfo(std::vector<int>& num_x_tiles, std::vector<int>& num_y_tiles, const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num_x_levels = CalculateNumXLevels(exr_header); int num_y_levels = CalculateNumYLevels(exr_header); num_x_tiles.resize(num_x_levels); num_y_tiles.resize(num_y_levels); CalculateNumTiles(num_x_tiles, max_x - min_x + 1, exr_header->tile_size_x, exr_header->tile_rounding_mode); CalculateNumTiles(num_y_tiles, max_y - min_y + 1, exr_header->tile_size_y, exr_header->tile_rounding_mode); } static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) { offset_data.offsets.resize(1); offset_data.offsets[0].resize(1); offset_data.offsets[0][0].resize(num_blocks); offset_data.num_x_levels = 1; offset_data.num_y_levels = 1; } // Return sum of tile blocks. static int InitTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const std::vector<int>& num_x_tiles, const std::vector<int>& num_y_tiles) { int num_tile_blocks = 0; offset_data.num_x_levels = static_cast<int>(num_x_tiles.size()); offset_data.num_y_levels = static_cast<int>(num_y_tiles.size()); switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: case TINYEXR_TILE_MIPMAP_LEVELS: assert(offset_data.num_x_levels == offset_data.num_y_levels); offset_data.offsets.resize(offset_data.num_x_levels); for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { offset_data.offsets[l].resize(num_y_tiles[l]); for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[l]); num_tile_blocks += num_x_tiles[l]; } } break; case TINYEXR_TILE_RIPMAP_LEVELS: offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels)); for (int ly = 0; ly < offset_data.num_y_levels; ++ly) { for (int lx = 0; lx < offset_data.num_x_levels; ++lx) { int l = ly * offset_data.num_x_levels + lx; offset_data.offsets[l].resize(num_y_tiles[ly]); for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[lx]); num_tile_blocks += num_x_tiles[lx]; } } } break; default: assert(false); } return num_tile_blocks; } static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0) return true; return false; } static bool isValidTile(const EXRHeader* exr_header, const OffsetData& offset_data, int dx, int dy, int lx, int ly) { if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false; int num_x_levels = offset_data.num_x_levels; int num_y_levels = offset_data.num_y_levels; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: if (lx == 0 && ly == 0 && offset_data.offsets.size() > 0 && offset_data.offsets[0].size() > static_cast<size_t>(dy) && offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_MIPMAP_LEVELS: if (lx < num_x_levels && ly < num_y_levels && offset_data.offsets.size() > static_cast<size_t>(lx) && offset_data.offsets[lx].size() > static_cast<size_t>(dy) && offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels); if (lx < num_x_levels && ly < num_y_levels && (offset_data.offsets.size() > idx) && offset_data.offsets[idx].size() > static_cast<size_t>(dy) && offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) { return true; } } break; default: return false; } return false; } static void ReconstructTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const unsigned char* head, const unsigned char* marker, const size_t /*size*/, bool isMultiPartFile, bool isDeep) { int numXLevels = offset_data.num_x_levels; for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 tileOffset = marker - head; if (isMultiPartFile) { //int partNumber; marker += sizeof(int); } int tileX; memcpy(&tileX, marker, sizeof(int)); tinyexr::swap4(&tileX); marker += sizeof(int); int tileY; memcpy(&tileY, marker, sizeof(int)); tinyexr::swap4(&tileY); marker += sizeof(int); int levelX; memcpy(&levelX, marker, sizeof(int)); tinyexr::swap4(&levelX); marker += sizeof(int); int levelY; memcpy(&levelY, marker, sizeof(int)); tinyexr::swap4(&levelY); marker += sizeof(int); if (isDeep) { tinyexr::tinyexr_int64 packed_offset_table_size; memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size)); marker += sizeof(tinyexr::tinyexr_int64); tinyexr::tinyexr_int64 packed_sample_size; memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size)); marker += sizeof(tinyexr::tinyexr_int64); // next Int64 is unpacked sample size - skip that too marker += packed_offset_table_size + packed_sample_size + 8; } else { int dataSize; memcpy(&dataSize, marker, sizeof(int)); tinyexr::swap4(&dataSize); marker += sizeof(int); marker += dataSize; } if (!isValidTile(exr_header, offset_data, tileX, tileY, levelX, levelY)) return; int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels); offset_data.offsets[level_idx][tileY][tileX] = tileOffset; } } } } // marker output is also static int ReadOffsets(OffsetData& offset_data, const unsigned char* head, const unsigned char*& marker, const size_t size, const char** err) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offset_data.offsets[l][dy][dx] = offset; } } } return TINYEXR_SUCCESS; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_x - exr_header->data_window.min_x == std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if (data_width > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } if (exr_header->tiled) { if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. OffsetData offset_data; size_t num_blocks = 0; // For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header. // If chunk_count > 0 then chunk_count must be equal to the calculated tile count. if (exr_header->tiled) { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header); num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles); if (exr_header->chunk_count > 0) { if (exr_header->chunk_count != static_cast<int>(num_blocks)) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } } int ret = ReadOffsets(offset_data, head, marker, size, err); if (ret != TINYEXR_SUCCESS) return ret; if (IsAnyOffsetsAreInvalid(offset_data)) { ReconstructTileOffsets(offset_data, exr_header, head, marker, size, exr_header->multipart, exr_header->non_image); } } else if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); InitSingleResolutionOffsets(offset_data, num_blocks); } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } InitSingleResolutionOffsets(offset_data, num_blocks); } if (!exr_header->tiled) { std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader &exr_header, std::vector<std::string> &layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader &exr_header, const std::string layer_name, std::vector<LayerChannel> &channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */ NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer( exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * static_cast<int>(exr_header.tile_size_x) + i; const int jj = exr_image.tiles[it].offset_y * static_cast<int>(exr_header.tile_size_y) + j; const int idx = ii + jj * static_cast<int>(exr_image.width); // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); exr_header->multipart = version->multipart ? 1 : 0; exr_header->non_image = version->non_image ? 1 : 0; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } namespace tinyexr { // out_data must be allocated initially with the block-header size // of the current image(-part) type static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data, const unsigned char* const* images, int compression_type, int /*line_order*/, int width, // for tiled : tile.width int /*height*/, // for tiled : header.tile_size_y int x_stride, // for tiled : header.tile_size_x int line_no, // for tiled : 0 int num_lines, // for tiled : tile.height size_t pixel_data_size, const std::vector<ChannelInfo>& channels, const std::vector<size_t>& channel_offset_list, const void* compression_param = 0) // zfp compression param { size_t buf_size = static_cast<size_t>(width) * static_cast<size_t>(num_lines) * static_cast<size_t>(pixel_data_size); //int last2bit = (buf_size & 3); // buf_size must be multiple of four //if(last2bit) buf_size += 4 - last2bit; std::vector<unsigned char> buf(buf_size); size_t start_y = static_cast<size_t>(line_no); for (size_t c = 0; c < channels.size(); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(&f32.f); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned short val = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { float val = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned int val = reinterpret_cast<const unsigned int * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) out_data.insert(out_data.end(), buf.begin(), buf.end()); } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, width, num_lines); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param); std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else (void)compression_param; assert(0); #endif } else { assert(0); return false; } return true; } static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header, const std::vector<tinyexr::ChannelInfo>& channels, std::vector<std::vector<unsigned char> >& data_list, size_t start_index, // for data_list int num_x_tiles, int num_y_tiles, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const void* compression_param, // must be set if zfp compression is enabled std::string* err) { int num_tiles = num_x_tiles * num_y_tiles; assert(num_tiles == level_image->num_tiles); if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) && level_image->level_x == 0 && level_image->level_y == 0) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = tile_count++) < num_tiles) { #else // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_tiles; i++) { #endif size_t tile_idx = static_cast<size_t>(i); size_t data_idx = tile_idx + start_index; int x_tile = i % num_x_tiles; int y_tile = i / num_x_tiles; EXRTile& tile = level_image->tiles[tile_idx]; const unsigned char* const* images = static_cast<const unsigned char* const*>(tile.images); data_list[data_idx].resize(5*sizeof(int)); size_t data_header_size = data_list[data_idx].size(); bool ret = EncodePixelData(data_list[data_idx], images, exr_header->compression_type, 0, // increasing y tile.width, exr_header->tile_size_y, exr_header->tile_size_x, 0, tile.height, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; } assert(data_list[data_idx].size() > data_header_size); int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size); //tileX, tileY, levelX, levelY // pixel_data_size(int) memcpy(&data_list[data_idx][0], &x_tile, sizeof(int)); memcpy(&data_list[data_idx][4], &y_tile, sizeof(int)); memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int)); memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int)); memcpy(&data_list[data_idx][16], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[data_idx][0])); swap4(reinterpret_cast<int*>(&data_list[data_idx][4])); swap4(reinterpret_cast<int*>(&data_list[data_idx][8])); swap4(reinterpret_cast<int*>(&data_list[data_idx][12])); swap4(reinterpret_cast<int*>(&data_list[data_idx][16])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } return TINYEXR_SUCCESS; } static int NumScanlines(int compression_type) { int num_scanlines = 1; if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } return num_scanlines; } static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, const std::vector<ChannelInfo>& channels, int num_blocks, tinyexr_uint64 chunk_offset, // starting offset of current chunk bool is_multipart, OffsetData& offset_data, // output block offsets, must be initialized std::vector<std::vector<unsigned char> >& data_list, // output tinyexr_uint64& total_size, // output: ending offset of current chunk std::string* err) { int num_scanlines = NumScanlines(exr_header->compression_type); data_list.resize(num_blocks); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; { size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } } const void* compression_param = 0; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { std::string e; bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes, &e); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } compression_param = &zfp_compression_param; } #endif tinyexr_uint64 offset = chunk_offset; tinyexr_uint64 doffset = is_multipart ? 4u : 0u; if (exr_image->tiles) { const EXRImage* level_image = exr_image; size_t block_idx = 0; tinyexr::tinyexr_uint64 block_data_size = 0; int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { if (!level_image) { if (err) { (*err) += "Invalid number of tiled levels for EncodeChunk\n"; } return TINYEXR_ERROR_INVALID_DATA; } int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); if (level_index_from_image != level_index) { if (err) { (*err) += "Incorrect level ordering in tiled image\n"; } return TINYEXR_ERROR_INVALID_DATA; } int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); std::string e; int ret = EncodeTiledLevel(level_image, exr_header, channels, data_list, block_idx, num_x_tiles, num_y_tiles, channel_offset_list, pixel_data_size, compression_param, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty() && err) { (*err) += e; } return ret; } for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j) for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) { offset_data.offsets[level_index][j][i] = offset; swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i])); offset += data_list[block_idx].size() + doffset; block_data_size += data_list[block_idx].size(); ++block_idx; } level_image = level_image->next_level; } assert(static_cast<int>(block_idx) == num_blocks); total_size = offset; } else { // scanlines std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); std::vector<std::thread> workers; std::atomic<int> block_count(0); int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks); for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = block_count++) < num_blocks) { #else bool invalid_data(false); #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { #endif int start_y = num_scanlines * i; int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height); int num_lines = end_Y - start_y; const unsigned char* const* images = static_cast<const unsigned char* const*>(exr_image->images); data_list[i].resize(2*sizeof(int)); size_t data_header_size = data_list[i].size(); bool ret = EncodePixelData(data_list[i], images, exr_header->compression_type, 0, // increasing y exr_image->width, exr_image->height, exr_image->width, start_y, num_lines, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; // "break" cannot be used with OpenMP } assert(data_list[i].size() > data_header_size); int data_len = static_cast<int>(data_list[i].size() - data_header_size); memcpy(&data_list[i][0], &start_y, sizeof(int)); memcpy(&data_list[i][4], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[i][0])); swap4(reinterpret_cast<int*>(&data_list[i][4])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode scanline data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size() + doffset; } total_size = static_cast<size_t>(offset); } return TINYEXR_SUCCESS; } // can save a single or multi-part image (no deep* formats) static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory_out == NULL) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } { for (unsigned int i = 0; i < num_parts; ++i) { if (exr_headers[i]->compression_type < 0) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #else for (int c = 0; c < exr_header->num_channels; ++c) { if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif } } std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version // using value from the first header int long_name = exr_headers[0]->long_name; { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->non_image) { marker[1] |= 0x8; } */ // tiled if (num_parts == 1 && exr_images[0].tiles) { marker[1] |= 0x2; } // long_name if (long_name) { marker[1] |= 0x4; } // multipart if (num_parts > 1) { marker[1] |= 0x10; } memory.insert(memory.end(), marker, marker + 4); } int total_chunk_count = 0; std::vector<int> chunk_count(num_parts); std::vector<OffsetData> offset_data(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { if (!exr_images[i].tiles) { int num_scanlines = NumScanlines(exr_headers[i]->compression_type); chunk_count[i] = (exr_images[i].height + num_scanlines - 1) / num_scanlines; InitSingleResolutionOffsets(offset_data[i], chunk_count[i]); total_chunk_count += chunk_count[i]; } else { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); chunk_count[i] = InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); total_chunk_count += chunk_count[i]; } } } // Write attributes to memory buffer. std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts); { std::set<std::string> partnames; for (unsigned int i = 0; i < num_parts; ++i) { //channels { std::vector<unsigned char> data; for (int c = 0; c < exr_headers[i]->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_headers[i]->pixel_types[c]; info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_headers[i]->channels[c].name); channels[i].push_back(info); } tinyexr::WriteChannelInfo(data, channels[i]); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_headers[i]->compression_type; swap4(&comp); WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char*>(&comp), 1); } { int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 }; swap4(&data[0]); swap4(&data[1]); swap4(&data[2]); swap4(&data[3]); WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4); int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 }; swap4(&data0[0]); swap4(&data0[1]); swap4(&data0[2]); swap4(&data0[3]); // Note: must be the same across parts (currently, using value from the first header) WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { // Note: must be the same across parts float aspectRatio = 1.0f; swap4(&aspectRatio); WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; swap4(&center[0]); swap4(&center[1]); WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float)); } { float w = 1.0f; swap4(&w); WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char*>(&w), sizeof(float)); } if (exr_images[i].tiles) { unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3); if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u); //unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int datai[3] = { 0, 0, 0 }; unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]); datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x); datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y); data[8] = tile_mode; swap4(reinterpret_cast<unsigned int*>(&data[0])); swap4(reinterpret_cast<unsigned int*>(&data[4])); WriteAttributeToMemory( &memory, "tiles", "tiledesc", reinterpret_cast<const unsigned char*>(data), 9); } // must be present for multi-part files - according to spec. if (num_parts > 1) { // name { size_t len = 0; if ((len = strlen(exr_headers[i]->name)) > 0) { partnames.insert(std::string(exr_headers[i]->name)); if (partnames.size() != i + 1) { SetErrorMessage("'name' attributes must be unique for a multi-part file", err); return 0; } WriteAttributeToMemory( &memory, "name", "string", reinterpret_cast<const unsigned char*>(exr_headers[i]->name), static_cast<int>(len)); } else { SetErrorMessage("Invalid 'name' attribute for a multi-part file", err); return 0; } } // type { const char* type = "scanlineimage"; if (exr_images[i].tiles) type = "tiledimage"; WriteAttributeToMemory( &memory, "type", "string", reinterpret_cast<const unsigned char*>(type), static_cast<int>(strlen(type))); } // chunkCount { WriteAttributeToMemory( &memory, "chunkCount", "int", reinterpret_cast<const unsigned char*>(&chunk_count[i]), 4); } } // Custom attributes if (exr_headers[i]->num_custom_attributes > 0) { for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) { tinyexr::WriteAttributeToMemory( &memory, exr_headers[i]->custom_attributes[j].name, exr_headers[i]->custom_attributes[j].type, reinterpret_cast<const unsigned char*>( exr_headers[i]->custom_attributes[j].value), exr_headers[i]->custom_attributes[j].size); } } { // end of header memory.push_back(0); } } } if (num_parts > 1) { // end of header list memory.push_back(0); } tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64); tinyexr_uint64 total_size = 0; std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { std::string e; int ret = EncodeChunk(&exr_images[i], exr_headers[i], channels[i], chunk_count[i], // starting offset of current chunk after part-number chunk_offset, num_parts > 1, offset_data[i], // output: block offsets, must be initialized data_lists[i], // output total_size, // output &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return 0; } chunk_offset = total_size; } // Allocating required memory if (total_size == 0) { // something went wrong tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char*>(malloc(total_size)); // Writing header memcpy((*memory_out), &memory[0], memory.size()); unsigned char* memory_ptr = *memory_out + memory.size(); size_t sum = memory.size(); // Writing offset data for chunks for (unsigned int i = 0; i < num_parts; ++i) { if (exr_images[i].tiles) { const EXRImage* level_image = &exr_images[i]; int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) { size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size(); sum += num_bytes; assert(sum <= total_size); memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]), num_bytes); memory_ptr += num_bytes; } level_image = level_image->next_level; } } else { size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]); sum += num_bytes; assert(sum <= total_size); std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0]; memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes); memory_ptr += num_bytes; } } // Writing chunk data for (unsigned int i = 0; i < num_parts; ++i) { for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) { if (num_parts > 1) { sum += 4; assert(sum <= total_size); unsigned int part_number = i; swap4(&part_number); memcpy(memory_ptr, &part_number, 4); memory_ptr += 4; } sum += data_lists[i][j].size(); assert(sum <= total_size); memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size()); memory_ptr += data_lists[i][j].size(); } } assert(sum == total_size); return total_size; // OK } } // tinyexr size_t SaveEXRImageToMemory(const EXRImage* exr_image, const EXRHeader* exr_header, unsigned char** memory_out, const char** err) { return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err); } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2 || memory_out == NULL) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err); } int SaveEXRMultipartImageToFile(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, const char* filename, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(&dx); tinyexr::swap4(&dy); tinyexr::swap4(&dw); tinyexr::swap4(&dh); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(&x); tinyexr::swap4(&y); tinyexr::swap4(&w); tinyexr::swap4(&h); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(&line_no); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->next_level = NULL; exr_image->level_x = 0; exr_image->level_y = 0; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } EXRSetNameAttr(exr_header, NULL); return TINYEXR_SUCCESS; } void EXRSetNameAttr(EXRHeader* exr_header, const char* name) { if (exr_header == NULL) { return; } memset(exr_header->name, 0, 256); if (name != NULL) { size_t len = std::min(strlen(name), (size_t)255); if (len) { memcpy(exr_header->name, name, len); } } } int EXRNumLevels(const EXRImage* exr_image) { if (exr_image == NULL) return 0; if(exr_image->images) return 1; // scanlines int levels = 1; const EXRImage* level_image = exr_image; while((level_image = level_image->next_level)) ++levels; return levels; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_image->next_level) { FreeEXRImage(exr_image->next_level); delete exr_image->next_level; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); memset(exr_header, 0, sizeof(EXRHeader)); ConvertHeader(exr_header, infos[i]); exr_header->multipart = exr_version->multipart ? 1 : 0; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (err != 0) { // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<tinyexr::OffsetData> chunk_offset_table_list; chunk_offset_table_list.reserve(num_parts); for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1); tinyexr::OffsetData& offset_data = chunk_offset_table_list.back(); if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) { tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count); std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0]; for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } } else { { std::vector<int> num_x_tiles, num_y_tiles; tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles); if (num_blocks != exr_headers[i]->chunk_count) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number' marker += sizeof(tinyexr::tinyexr_uint64); // = 8 } } } } } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { tinyexr::OffsetData &offset_data = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { const unsigned char *part_number_addr = memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEFINED #endif // TINYEXR_IMPLEMENTATION
GB_unop__abs_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_fp32_fc32) // op(A') function: GB (_unop_tran__abs_fp32_fc32) // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cabsf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cabsf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cabsf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_fp32_fc32) ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cabsf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cabsf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_fp32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
simple_env.c
// RUN: %libomp-compile // RUN: env OMP_DISPLAY_AFFINITY=true OMP_AFFINITY_FORMAT='TESTER-ENV: tl:%L tn:%n nt:%N' OMP_NUM_THREADS=8 %libomp-run | %python %S/check.py -c 'CHECK-8' %s // REQUIRES: !abt #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char** argv) { #pragma omp parallel { } #pragma omp parallel { } return 0; } // CHECK-8: num_threads=8 TESTER-ENV: tl:1 tn:[0-7] nt:8$
GB_unop__minv_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_uint8_uint8 // op(A') function: GB_unop_tran__minv_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_uint8_uint8 ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
coordinate_common.h
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #pragma once #include <algorithm> #include <string> #include <utility> #include <vector> #include <limits> #include "xgboost/data.h" #include "xgboost/parameter.h" #include "./param.h" #include "../gbm/gblinear_model.h" #include "../common/random.h" namespace xgboost { namespace linear { struct CoordinateParam : public XGBoostParameter<CoordinateParam> { int top_k; DMLC_DECLARE_PARAMETER(CoordinateParam) { DMLC_DECLARE_FIELD(top_k) .set_lower_bound(0) .set_default(0) .describe("The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); } }; /** * \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the * number of training instances. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * \param w The weight. * \param reg_alpha Unnormalised L1 penalty. * \param reg_lambda Unnormalised L2 penalty. * * \return The weight update. */ inline double CoordinateDelta(double sum_grad, double sum_hess, double w, double reg_alpha, double reg_lambda) { if (sum_hess < 1e-5f) return 0.0f; const double sum_grad_l2 = sum_grad + reg_lambda * w; const double sum_hess_l2 = sum_hess + reg_lambda; const double tmp = w - sum_grad_l2 / sum_hess_l2; if (tmp >= 0) { return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w); } else { return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w); } } /** * \brief Calculate update to bias. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * * \return The weight update. */ inline double CoordinateDeltaBias(double sum_grad, double sum_hess) { return -sum_grad / sum_hess; } /** * \brief Get the gradient with respect to a single feature. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to a single feature. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to the bias. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for the bias. */ inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint i = 0; i < ndata; ++i) { auto &p = gpair[i * num_group + group_idx]; if (p.GetHess() >= 0.0f) { sum_grad += p.GetGrad(); sum_hess += p.GetHess(); } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Updates the gradient vector with respect to a change in weight. * * \param fidx The feature index. * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dw The change in weight. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateResidualParallel(int fidx, int group_idx, int num_group, float dw, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dw == 0.0f) return; for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { auto col = batch[fidx]; // update grad value const auto num_row = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < num_row; ++j) { GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0); } } } /** * \brief Updates the gradient vector based on a change in the bias. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dbias The change in bias. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dbias == 0.0f) return; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { GradientPair &g = (*in_gpair)[i * num_group + group_idx]; if (g.GetHess() < 0.0f) continue; g += GradientPair(g.GetHess() * dbias, 0); } } /** * \brief Abstract class for stateful feature selection or ordering * in coordinate descent algorithms. */ class FeatureSelector { public: /*! \brief factory method */ static FeatureSelector *Create(int choice); /*! \brief virtual destructor */ virtual ~FeatureSelector() = default; /** * \brief Setting up the selector state prior to looping through features. * * \param model The model. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ virtual void Setup(const gbm::GBLinearModel &, const std::vector<GradientPair> &, DMatrix *, float , float , int ) {} /** * \brief Select next coordinate to update. * * \param iteration The iteration in a loop through features * \param model The model. * \param group_idx Zero-based index of the group. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * * \return The index of the selected feature. -1 indicates none selected. */ virtual int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) = 0; }; /** * \brief Deterministic selection by cycling through features one at a time. */ class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int , const std::vector<GradientPair> &, DMatrix *, float, float) override { return iteration % model.learner_model_param->num_feature; } }; /** * \brief Similar to Cyclic but with random feature shuffling prior to each update. * \note Its randomness is controllable by setting a random seed. */ class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &g, DMatrix *, float, float, int) override { if (feat_index_.size() == 0) { feat_index_.resize(model.learner_model_param->num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); } std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom()); } int NextFeature(int iteration, const gbm::GBLinearModel &model, int, const std::vector<GradientPair> &, DMatrix *, float, float) override { return feat_index_[iteration % model.learner_model_param->num_feature]; } protected: std::vector<bst_uint> feat_index_; }; /** * \brief A random (with replacement) coordinate selector. * \note Its randomness is controllable by setting a random seed. */ class RandomFeatureSelector : public FeatureSelector { public: int NextFeature(int, const gbm::GBLinearModel &model, int, const std::vector<GradientPair> &, DMatrix *, float, float) override { return common::GlobalRandom()() % model.learner_model_param->num_feature; } }; /** * \brief Select coordinate with the greatest gradient magnitude. * \note It has O(num_feature^2) complexity. It is fully deterministic. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). That would reduce the complexity to * O(num_feature*top_k). */ class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &, DMatrix *, float, float, int param) override { top_k_ = static_cast<bst_uint>(param); const bst_uint ngroup = model.learner_model_param->num_output_group; if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (counter_.size() == 0) { counter_.resize(ngroup); gpair_sums_.resize(model.learner_model_param->num_feature * ngroup); } for (bst_uint gid = 0u; gid < ngroup; ++gid) { counter_[gid] = 0u; } } int NextFeature(int, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-K or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1; const int ngroup = model.learner_model_param->num_output_group; const bst_omp_uint nfeat = model.learner_model_param->num_feature; // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); auto &sums = gpair_sums_[group_idx * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + group_idx]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } // Find a feature with the largest magnitude of weight change int best_fidx = 0; double best_weight_update = 0.0f; for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) { auto &s = gpair_sums_[group_idx * nfeat + fidx]; float dw = std::abs(static_cast<bst_float>( CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda))); if (dw > best_weight_update) { best_weight_update = dw; best_fidx = fidx; } } return best_fidx; } protected: bst_uint top_k_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief Thrifty, approximately-greedy feature selector. * * \note Prior to cyclic updates, reorders features in descending magnitude of * their univariate weight changes. This operation is multithreaded and is a * linear complexity approximation of the quadratic greedy selection. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). */ class ThriftyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); const bst_uint ngroup = model.learner_model_param->num_output_group; const bst_omp_uint nfeat = model.learner_model_param->num_feature; if (deltaw_.size() == 0) { deltaw_.resize(nfeat * ngroup); sorted_idx_.resize(nfeat * ngroup); counter_.resize(ngroup); gpair_sums_.resize(nfeat * ngroup); } // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetBatches<CSCPage>()) { // column-parallel is usually faster than row-parallel #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); for (bst_uint gid = 0u; gid < ngroup; ++gid) { auto &sums = gpair_sums_[gid * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + gid]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } } // rank by descending weight magnitude within the groups std::fill(deltaw_.begin(), deltaw_.end(), 0.f); std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0); bst_float *pdeltaw = &deltaw_[0]; for (bst_uint gid = 0u; gid < ngroup; ++gid) { // Calculate univariate weight changes for (bst_omp_uint i = 0; i < nfeat; ++i) { auto ii = gid * nfeat + i; auto &s = gpair_sums_[ii]; deltaw_[ii] = static_cast<bst_float>(CoordinateDelta( s.first, s.second, model[i][gid], alpha, lambda)); } // sort in descending order of deltaw abs values auto start = sorted_idx_.begin() + gid * nfeat; std::sort(start, start + nfeat, [pdeltaw](size_t i, size_t j) { return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j)); }); counter_[gid] = 0u; } } int NextFeature(int, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &, DMatrix *, float, float) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1; // note that sorted_idx stores the "long" indices const size_t grp_offset = group_idx * model.learner_model_param->num_feature; return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset); } protected: bst_uint top_k_; std::vector<bst_float> deltaw_; std::vector<size_t> sorted_idx_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; inline FeatureSelector *FeatureSelector::Create(int choice) { switch (choice) { case kCyclic: return new CyclicFeatureSelector(); case kShuffle: return new ShuffleFeatureSelector(); case kThrifty: return new ThriftyFeatureSelector(); case kGreedy: return new GreedyFeatureSelector(); case kRandom: return new RandomFeatureSelector(); default: LOG(FATAL) << "unknown coordinate selector: " << choice; } return nullptr; } } // namespace linear } // namespace xgboost
atomic-15.c
// { dg-do run } extern void abort (void); int x = 6; int main () { int v, l = 2, s = 1; #pragma omp atomic x = -3 + x; #pragma omp atomic read v = x; if (v != 3) abort (); #pragma omp atomic update x = 3 * 2 * 1 + x; #pragma omp atomic read v = x; if (v != 9) abort (); #pragma omp atomic capture v = x = x | 16; if (v != 25) abort (); #pragma omp atomic capture v = x = x + 14 * 2 / 4; if (v != 32) abort (); #pragma omp atomic capture v = x = 5 | x; if (v != 37) abort (); #pragma omp atomic capture v = x = 40 + 12 - 2 - 7 - x; if (v != 6) abort (); #pragma omp atomic read v = x; if (v != 6) abort (); #pragma omp atomic capture { v = x; x = 3 + x; } if (v != 6) abort (); #pragma omp atomic capture { v = x; x = -1 * -1 * -1 * -1 - x; } if (v != 9) abort (); #pragma omp atomic read v = x; if (v != -8) abort (); #pragma omp atomic capture { x = 2 * 2 - x; v = x; } if (v != 12) abort (); #pragma omp atomic capture { x = 7 & x; v = x; } if (v != 4) abort (); #pragma omp atomic capture { v = x; x = 6; } if (v != 4) abort (); #pragma omp atomic read v = x; if (v != 6) abort (); #pragma omp atomic capture { v = x; x = 7 * 8 + 23; } if (v != 6) abort (); #pragma omp atomic read v = x; if (v != 79) abort (); #pragma omp atomic capture { v = x; x = 23 + 6 * 4; } if (v != 79) abort (); #pragma omp atomic read v = x; if (v != 47) abort (); #pragma omp atomic capture { v = x; x = l ? 17 : 12; } if (v != 47) abort (); #pragma omp atomic capture { v = x; x = l = s++ + 3; } if (v != 17 || l != 4 || s != 2) abort (); #pragma omp atomic read v = x; if (v != 4) abort (); return 0; }
innerProd.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(innerProd)( const dlong & Nblocks, const dlong & N, const dlong & offset, const dfloat * __restrict__ cpu_a, const dfloat * __restrict__ cpu_b, dfloat * __restrict__ cpu_ab){ dfloat ab = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for reduction(+:ab) #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i+offset]; const dfloat bi = cpu_b[i]; ab += ai*bi; } cpu_ab[0] = ab; }
trans.c
/*Daala video codec Copyright (c) 2013 Daala project contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include "od_defs.h" #include "od_filter.h" #include "trans_tools.h" #include "int_search.h" #include "kiss99.h" #define USE_FILES (0) #define USE_AR95 (1) #define USE_SUBSET1 (0) #define USE_SUBSET3 (0) #define PRINT_COV (0) #define CG_SEARCH (0) #define USE_SIMPLEX (1) #define RAMP_DYADIC (0) #if CG_SEARCH # if USE_TYPE3 && RAMP_DYADIC # error "Dyadic ramp constraint not supported for Type-III transform." # endif # if USE_SIMPLEX && RAMP_DYADIC # error "Dyadic ramp constraint not supported with simplex search." # endif static void coding_gain_search(const double _r[2*B_SZ]){ # if !USE_SIMPLEX # if B_SZ==4 { int f[4]; int p0; int q0; int s0; int s1; double cg; double best_cg; best_cg=0; # if RAMP_DYADIC for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){ int t0; f[3]=q0; /* S0 = 4/1*(1-q0/64) * S0 >= 1 -> 64-q0 >= 16 */ t0=(1<<FILTER_BITS)-q0; s0=1*t0-0; if(s0>=(1<<FILTER_BITS-2)){ s0*=4; f[0]=s0; for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){ f[2]=p0; /* S1 = 4/3*(1-(1-q0/64)*p0/64) * S1 >= 1 -> 64^2-(64-q0)*p0 >= 64*48 * S1 = x/64 -> 64^2-(64-q0)*p0 = 0 MOD 48 */ s1=(1<<2*FILTER_BITS)-t0*p0; if(s1>=(1<<FILTER_BITS)*(3<<FILTER_BITS-2)&&s1%(3<<FILTER_BITS-2)==0){ s1/=(3<<FILTER_BITS-2); f[1]=s1; cg=coding_gain_1d_collapsed(_r,f); if(cg>best_cg){ best_cg=cg; printf("%i %i %i %i %G\n",p0,q0,s0,s1,cg); } } } } } # else for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){ f[2]=p0; for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){ f[3]=q0; for(s0=(1<<FILTER_BITS);s0<=2*(1<<FILTER_BITS);s0++){ f[0]=s0; for(s1=(1<<FILTER_BITS);s1<=2*(1<<FILTER_BITS);s1++){ f[1]=s1; cg=coding_gain_1d_collapsed(_r,f); if(cg>best_cg){ best_cg=cg; printf("%i %i %i %i %G\n",p0,q0,s0,s1,cg); } } } } } # endif } # elif B_SZ==8 { int f[10]; int p0; int p1; int p2; int q0; int q1; int q2; int s0; int s1; int s2; int s3; double cg; double best_cg; best_cg=0; # if RAMP_DYADIC for(q0=(1<<FILTER_BITS);q0>=-(1<<FILTER_BITS);q0--){ int t0; f[7]=q0; /* S0 = 8/1*(1-q0/64) * S0 >= 1 -> 64-q0 >= 8 */ t0=(1<<FILTER_BITS)-q0; s0=1*t0-0; if(s0>=(1<<FILTER_BITS-3)){ s0*=8; f[0]=s0; for(p0=-(1<<FILTER_BITS);p0<=(1<<FILTER_BITS);p0++){ f[4]=p0; for(q1=(1<<FILTER_BITS);q1>=-(1<<FILTER_BITS);q1--){ int t1; f[8]=q1; /* S1 = 8/3*((1-q1/64)-(1-q0/64)*p0/64) * S1 >= 1 -> 64*t1-t0*p0 >= 64*24 * S1 = x/64 -> 64*t1-t0*p0 = 0 MOD 24 */ t1=(1<<FILTER_BITS)-q1; s1=(1<<FILTER_BITS)*t1-t0*p0; if(s1>=(1<<FILTER_BITS)*(3<<FILTER_BITS-3)&& s1%(3<<FILTER_BITS-3)==0){ s1/=(3<<FILTER_BITS-3); f[1]=s1; for(p1=-(1<<FILTER_BITS);p1<=(1<<FILTER_BITS);p1++){ f[5]=p1; for(q2=(1<<FILTER_BITS);q2>=-(1<<FILTER_BITS);q2--){ int t2; f[9]=q2; /* S2 = 8/5*((1-q2/64)-(1-q1/64)*p1/64) * S2 >= 1 -> 64*t2-t1*p1) >= 64*40 * S2 = x/64 -> 64*t2-t1*p1 = 0 MOD 40 */ t2=(1<<FILTER_BITS)-q2; s2=(1<<FILTER_BITS)*t2-t1*p1; if(s2>=(1<<FILTER_BITS)*(5<<FILTER_BITS-3)&& s2%(5<<FILTER_BITS-3)==0){ s2/=(5<<FILTER_BITS-3); f[2]=s2; for(p2=-(1<<FILTER_BITS);p2<=(1<<FILTER_BITS);p2++){ f[6]=p2; /* S3 = 8/7*(1-(1-q2/64)*p2/64) * S3 >= 1 -> 64^2-t2*p2 >= 64*56 * S3 = x/64 -> 64^2-t2*p2 = 0 MOD 56 */ s3=(1<<2*FILTER_BITS)-t2*p2; if(s3>=(1<<FILTER_BITS)*(7<<FILTER_BITS-3)&& s3%(7<<FILTER_BITS-3)==0){ s3/=(7<<FILTER_BITS-3); f[3]=s3; cg=coding_gain_1d_collapsed(_r,f); if(cg>best_cg){ best_cg=cg; printf("%i %i %i %i %i %i %i %i %i %i %-24.18G\n", p0,p1,p2,q0,q1,q2,s0,s1,s2,s3,cg); } } } } } } } } } } } # else # error "Exhaustive search for B_SZ==8 only supported using RAMP_DYADIC (1)." # endif } # else # error "Exhaustive search not supported for this block size." # endif # else { int dims; int i; kiss99_ctx ks[NUM_PROCS]; int lb[22]; int ub[22]; # if B_SZ==4 dims=4; # elif B_SZ==8 dims=10; # elif B_SZ==16 dims=22; # else # error "Unsupported block size." # endif for(i=0;i<dims;i++){ lb[i]=i<(B_SZ>>1)?(1<<FILTER_BITS):-(1<<FILTER_BITS); ub[i]=i<(B_SZ>>1)?2*(1<<FILTER_BITS):(1<<FILTER_BITS); } for(i=0;i<NUM_PROCS;i++){ uint32_t srand; srand=i*16843009; /*Broadcast char to 4xchar*/ kiss99_srand(&ks[i],(unsigned char *)&srand,sizeof(srand)); } #pragma omp parallel for schedule(dynamic) for(i=0;i<128;i++){ int tid; int j; # if B_SZ==4 int f[4]; # elif B_SZ==8 int f[10]; # elif B_SZ==16 int f[22]; # else # error "Unsupported block size." # endif double cg; tid=OD_OMP_GET_THREAD; for(j=0;j<dims;j++){ int range; int mask; int rng; range=ub[j]-lb[j]; mask=(1<<OD_ILOG_NZ(range))-1; do { rng=((int)kiss99_rand(&ks[tid]))&mask; } while(rng>range); f[j]=lb[j]+rng; } j=int_simplex_max(&cg,dims,coding_gain_1d_collapsed,_r,lb,ub,f); fprintf(stdout,"obj=%-24.18G steps=%4d params={",cg,j); for(j=0;j<dims;j++){ fprintf(stdout,"%3d%c",f[j],j==dims-1?'}':','); } fprintf(stdout,"\n"); } } # endif } #endif #if USE_FILES static int t_start(void *_ctx,const char *_name,const th_info *_ti,int _pli, int _nxblocks,int _nyblocks){ trans_ctx *ctx; fprintf(stdout,"%s %i %i\n",_name,_nxblocks,_nyblocks); fflush(stdout); ctx=(trans_ctx *)_ctx; image_ctx_init(&ctx->img,_name,_nxblocks,_nyblocks); return EXIT_SUCCESS; } static void t_load_data(void *_ctx,const unsigned char *_data,int _stride, int _bi,int _bj){ trans_ctx *ctx; ctx=(trans_ctx *)_ctx; if(_bi==0&&_bj==0){ int x; int y; int z; unsigned char buf[2*B_SZ]; /* add the rows */ for(y=0;y<ctx->img.nyblocks*B_SZ;y++){ for(x=0;x<ctx->img.nxblocks*B_SZ-(2*B_SZ-1);x++){ for(z=0;z<2*B_SZ;z++){ buf[z]=_data[y*_stride+(x+z)]; } trans_data_add(&ctx->td,buf); } } /* add the columns */ for(y=0;y<ctx->img.nyblocks*B_SZ-(2*B_SZ-1);y++){ for(x=0;x<ctx->img.nxblocks*B_SZ;x++){ for(z=0;z<2*B_SZ;z++){ buf[z]=_data[(y+z)*_stride+x]; } trans_data_add(&ctx->td,buf); } } } } #define PADDING (0) const block_func BLOCKS[]={ t_load_data }; const int NBLOCKS=sizeof(BLOCKS)/sizeof(*BLOCKS); #endif int main(int _argc,const char *_argv[]){ trans_ctx ctx[NUM_PROCS]; const int *f; int i; double r[2*B_SZ]; const double *cov; (void)_argc; (void)_argv; #if B_SZ==4 f=OD_FILTER_PARAMS4; #elif B_SZ==8 f=OD_FILTER_PARAMS8; #elif B_SZ==16 f=OD_FILTER_PARAMS16; #else # error "Need filter params for this block size." #endif for(i=0;i<NUM_PROCS;i++){ trans_data_init(&ctx[i].td,2*B_SZ); } cov=r; #if USE_FILES OD_OMP_SET_THREADS(NUM_PROCS); ne_apply_to_blocks(ctx,sizeof(*ctx),0x1,PADDING,t_start,NBLOCKS,BLOCKS,NULL, _argc,_argv); for(i=1;i<NUM_PROCS;i++){ trans_data_combine(&ctx[0].td,&ctx[i].td); } trans_data_normalize(&ctx[0].td); # if PRINT_COV trans_data_print(&ctx[0].td,stderr); # endif fprintf(stdout,"original cg=%- 24.16G\n",coding_gain_1d(ctx[0].td.cov,f)); trans_data_collapse(&ctx[0].td,1,r); fprintf(stdout,"collapse cg=%- 24.16G\n",coding_gain_1d_collapsed(r,f)); trans_data_expand(&ctx[0].td,1,r); fprintf(stdout,"expanded cg=%- 24.16G\n",coding_gain_1d(ctx[0].td.cov,f)); #elif USE_AR95 auto_regressive_collapsed(r,2*B_SZ,1,0.95); #elif USE_SUBSET1 # if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES cov=SUBSET1_1D[B_SZ_LOG-OD_LOG_BSIZE0]; # else # error "Need auto-correlation matrix for subset1 for this block size." # endif #elif USE_SUBSET3 # if B_SZ_LOG>=OD_LOG_BSIZE0&&B_SZ_LOG<OD_LOG_BSIZE0+OD_NBSIZES cov=SUBSET3_1D[B_SZ_LOG-OD_LOG_BSIZE0]; # else # error "Need auto-correlation matrix for subset3 for this block size." # endif #endif #if CG_SEARCH coding_gain_search(cov); #else fprintf(stdout,"cg=%-24.18G\n",coding_gain_1d_collapsed(cov,f)); #endif for(i=0;i<NUM_PROCS;i++){ trans_data_clear(&ctx[i].td); } return EXIT_SUCCESS; }
eavlInfoTopologyMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_INFO_TOPOLOGY_MAP_OP_H #define EAVL_INFO_TOPOLOGY_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlInfoTopologyMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN, class OUT> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, F &functor) { #pragma omp parallel for for (int index = 0; index < nitems; ++index) { int shapeType = conn.GetShapeType(index); collect(index, outputs) = functor(shapeType, collect(index, inputs)); } } }; #if defined __CUDACC__ template <class CONN, class F, class IN, class OUT> __global__ void eavlInfoTopologyMapOp_kernel(int nitems, CONN conn, const IN inputs, OUT outputs, F functor) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int index = threadID; index < nitems; index += numThreads) { int shapeType = conn.GetShapeType(index); collect(index, outputs) = functor(shapeType, collect(index, inputs)); } } template <class CONN> struct eavlInfoTopologyMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN, class OUT> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlInfoTopologyMapOp_kernel<<< blocks, threads >>>(nitems, conn, inputs, outputs, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlInfoTopologyMapOp // // Purpose: /// Map from input to output arrays on the same topology type. /// /// Much like a standard map, in that it does an element-wise map /// between arrays of the same length, but the fields are known to /// be on some sort of topology type -- typically a cell. Or, you /// could instead think of it like an eavlTopologyMap, but both the /// inputs and outputs are on the same topology type. (Or like an /// eavlCombinedTopologyMap, but without a source topology.) /// /// Essentially, this just adds a "shapetype" to the functor call of /// a standard map operation. For example, a cell-to-cell map would /// be a simple map, but with the shape type (e.g. EAVL_HEX or /// EAVL_TET) passed along with every functor call. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class I, class O, class F> class eavlInfoTopologyMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; I inputs; O outputs; F functor; public: eavlInfoTopologyMapOp(eavlCellSet *c, eavlTopology t, I i, O o, F f) : cells(c), topology(t), inputs(i), outputs(o), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlInfoTopologyMapOp_CPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologyMapOp_CPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlInfoTopologyMapOp_GPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologyMapOp_GPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class I, class O, class F> eavlInfoTopologyMapOp<I,O,F> *new_eavlInfoTopologyMapOp(eavlCellSet *c, eavlTopology t, I i, O o, F f) { return new eavlInfoTopologyMapOp<I,O,F>(c,t,i,o,f); } #endif
GB_unop__tanh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tanh_fp32_fp32) // op(A') function: GB (_unop_tran__tanh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = tanhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tanhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = tanhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TANH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tanh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = tanhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tanh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmp_detach_tasks_t1.c
// RUN: %libomp-compile && env OMP_NUM_THREADS='3' %libomp-run // RUN: %libomp-compile && env OMP_NUM_THREADS='1' %libomp-run // REQUIRES: !abt #include <stdio.h> #include <omp.h> #include "omp_my_sleep.h" // detached untied #define PTASK_FLAG_DETACHABLE 0x40 // OpenMP RTL interfaces typedef unsigned long long kmp_uint64; typedef long long kmp_int64; typedef struct ID { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; // Compiler-generated code (emulation) typedef struct ident { void* dummy; // not used in the library } ident_t; typedef enum kmp_event_type_t { KMP_EVENT_UNINITIALIZED = 0, KMP_EVENT_ALLOW_COMPLETION = 1 } kmp_event_type_t; typedef struct { kmp_event_type_t type; union { void *task; } ed; } kmp_event_t; typedef struct shar { // shareds used in the task } *pshareds; typedef struct task { pshareds shareds; int(*routine)(int,struct task*); int part_id; // void *destructor_thunk; // optional, needs flag setting if provided // int priority; // optional, needs flag setting if provided // ------------------------------ // privates used in the task: omp_event_handle_t evt; } *ptask, kmp_task_t; typedef int(*task_entry_t)(int, ptask); #ifdef __cplusplus extern "C" { #endif extern int __kmpc_global_thread_num(void *id_ref); extern ptask __kmpc_omp_task_alloc(id *loc, int gtid, int flags, size_t sz, size_t shar, task_entry_t rtn); extern int __kmpc_omp_task(id *loc, int gtid, ptask task); extern omp_event_handle_t __kmpc_task_allow_completion_event( ident_t *loc_ref, int gtid, ptask task); #if __cplusplus } #endif int volatile checker; // User's code, outlined into task entry int task_entry(int gtid, ptask task) { checker = 1; return 0; } int main() { int i, j, gtid = __kmpc_global_thread_num(NULL); int nt = omp_get_max_threads(); ptask task; pshareds psh; checker = 0; omp_set_dynamic(0); #pragma omp parallel //num_threads(N) { #pragma omp master { int gtid = __kmpc_global_thread_num(NULL); omp_event_handle_t evt; /* #pragma omp task detach(evt) {} */ task = (ptask)__kmpc_omp_task_alloc(NULL,gtid,PTASK_FLAG_DETACHABLE,sizeof(struct task),sizeof(struct shar),&task_entry); psh = task->shareds; evt = (omp_event_handle_t)__kmpc_task_allow_completion_event(NULL,gtid,task); task->evt = evt; __kmpc_omp_task(NULL, gtid, task); my_sleep(2.0); omp_fulfill_event(evt); } // end master } // end parallel // check results if (checker == 1) { printf("passed\n"); return 0; } else { printf("failed\n"); return 1; } }
DormandPrinceSolver.h
#pragma once #include "../DifferentialSolver.h" template<typename Scalar> class DormandPrinceSolver : public DifferentialSolver<Scalar> { public: using DifferentialSolver<Scalar>::timeStep; using DifferentialSolver<Scalar>::currTime; using DifferentialSolver<Scalar>::tolerance; using DifferentialSolver<Scalar>::stepError; using DifferentialSolver<Scalar>::predictedStep; DormandPrinceSolver() {} void SetSystem(DifferentialSystem<Scalar>* system) override { this->system = system; initialCoords = (system->GetHierarchyLevelsCount() > 1) ? new Scalar[system->GetMaxDimentionsCount()] : nullptr; oldCoords = (system->GetHierarchyLevelsCount() > 1) ? new Scalar[system->GetMaxDimentionsCount()] : nullptr; currCoords = new Scalar[system->GetMaxDimentionsCount()]; nextCoords1 = new Scalar[system->GetMaxDimentionsCount()]; nextCoords2 = new Scalar[system->GetMaxDimentionsCount()]; probeCoords = new Scalar[system->GetMaxDimentionsCount()]; k1 = new Scalar[system->GetMaxDimentionsCount()]; k2 = new Scalar[system->GetMaxDimentionsCount()]; k3 = new Scalar[system->GetMaxDimentionsCount()]; k4 = new Scalar[system->GetMaxDimentionsCount()]; k5 = new Scalar[system->GetMaxDimentionsCount()]; k6 = new Scalar[system->GetMaxDimentionsCount()]; k7 = new Scalar[system->GetMaxDimentionsCount()]; currTime = Scalar(0.0); } virtual ~DormandPrinceSolver() { if (system->GetHierarchyLevelsCount() > 1) { delete [] initialCoords; delete [] oldCoords; } delete [] currCoords; delete [] nextCoords1; delete [] nextCoords2; delete [] probeCoords; delete [] k1; delete [] k2; delete [] k3; delete [] k4; delete [] k5; delete [] k6; delete [] k7; } int GetPhasesCount() const override { return 7; } void InitStep(Scalar timeStep, Scalar tolerance, bool updateInitialCoords) override { DifferentialSolver<Scalar>::InitStep(timeStep, tolerance, updateInitialCoords); if (updateInitialCoords) { system->GetCurrCoords(currTime, system->GetHierarchyLevelsCount() > 1 ? initialCoords : currCoords); } } void InitStep(const SolverState& solverState) override { if (system->GetHierarchyLevelsCount() > 1) { system->GetCurrCoords(currTime, currCoords, oldCoords, solverState); } } bool AdvancePhase(const SolverState& solverState) override { Scalar currStep = timeStep * (1 << solverState.hierarchyLevel); switch (solverState.phaseIndex) { case 0: { system->GetCurrDerivatives(k1, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(0.20) * k1[coordIndex] * currStep; } system->SetCurrCoords(currTime + currStep * Scalar(0.20), probeCoords, solverState); } break; case 1: { system->GetCurrDerivatives(k2, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(3.0 / 40.0) * k1[coordIndex] * currStep + Scalar(9.0 / 40.0) * k2[coordIndex] * currStep; } system->SetCurrCoords(currTime + currStep * Scalar(3.0 / 10.0), probeCoords, solverState); } break; case 2: { system->GetCurrDerivatives(k3, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(44.0 / 45.0) * k1[coordIndex] * currStep - Scalar(56.0 / 15.0) * k2[coordIndex] * currStep + Scalar(32.0 / 9.0) * k3[coordIndex] * currStep; } system->SetCurrCoords(currTime + currStep * Scalar(4.0 / 5.0), probeCoords, solverState); } break; case 3: { system->GetCurrDerivatives(k4, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(19372.0 / 6561.0) * k1[coordIndex] * currStep - Scalar(25360.0 / 2187.0) * k2[coordIndex] * currStep + Scalar(64448.0 / 6561.0) * k3[coordIndex] * currStep - Scalar(212.0 / 729.0) * k4[coordIndex] * currStep; } system->SetCurrCoords(currTime + currStep * Scalar(8.0 / 9.0), probeCoords, solverState); } break; case 4: { system->GetCurrDerivatives(k5, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(9017.0 / 3168.0) * k1[coordIndex] * currStep - Scalar(355.0 / 33.0) * k2[coordIndex] * currStep + Scalar(46732.0 / 5247.0) * k3[coordIndex] * currStep + Scalar(49.0 / 176.0) * k4[coordIndex] * currStep - Scalar(5103.0 / 18656.0) * k5[coordIndex] * currStep; } system->SetCurrCoords(currTime + currStep, probeCoords, solverState); } break; case 5: { system->GetCurrDerivatives(k6, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { probeCoords[coordIndex] = currCoords[coordIndex] + Scalar(35.0 / 384.0) * k1[coordIndex] * currStep + Scalar(500.0 / 1113.0) * k3[coordIndex] * currStep + Scalar(125.0 / 192.0) * k4[coordIndex] * currStep - Scalar(2187.0 / 6784.0) * k5[coordIndex] * currStep + Scalar(11.0 / 84.0) * k6[coordIndex] * currStep; } system->SetCurrCoords(currTime + currStep, probeCoords, solverState); } break; case 6: { system->GetCurrDerivatives(k7, solverState); #pragma omp parallel for for(int coordIndex = 0; coordIndex < system->GetDimentionsCount(solverState); coordIndex++) { nextCoords1[coordIndex] = currCoords[coordIndex] + currStep * ( Scalar(35.0 / 384.0) * k1[coordIndex] + Scalar(500.0 / 1113.0) * k3[coordIndex] + Scalar(125.0 / 192.0) * k4[coordIndex] - Scalar(2187.0 / 6784.0) * k5[coordIndex] + Scalar(11.0 / 84.0) * k6[coordIndex]); nextCoords2[coordIndex] = currCoords[coordIndex] + currStep * ( Scalar(5179.0 / 57600.0) * k1[coordIndex] + Scalar(7571.0 / 16695.0) * k3[coordIndex] + Scalar(393.0 / 640.0) * k4[coordIndex] - Scalar(92097.0 / 339200.0) * k5[coordIndex] + Scalar(187.0 / 2100.0) * k6[coordIndex] + Scalar(1.0 / 40.0) * k7[coordIndex]); } stepError = (system->GetErrorValue(currTime, nextCoords1, nextCoords2, solverState) / tolerance) / currStep; predictedStep = timeStep * Scalar(pow(Scalar(1.0) / stepError, Scalar(1.0 / 5.0))); } break; } return true; } void AdvanceStep(const SolverState& solverState) override { if (solverState.IsPreInitial()) { currTime += timeStep; } if (system->GetHierarchyLevelsCount() > 1) { system->SetCurrCoords(currTime, nextCoords2, oldCoords, solverState); } else { system->SetCurrCoords(currTime, nextCoords2); } } void RevertStep(Scalar currTime) override { this->currTime = currTime; system->SetCurrCoords(currTime, system->GetHierarchyLevelsCount() > 1 ? initialCoords : currCoords); } Scalar GetLastStepError() const override { return stepError; } Scalar GetTimeStepPrediction() const override { return predictedStep; } private: Scalar* initialCoords; Scalar* currCoords; Scalar* oldCoords; Scalar* nextCoords1; Scalar* nextCoords2; Scalar* probeCoords; Scalar* k1; Scalar* k2; Scalar* k3; Scalar* k4; Scalar* k5; Scalar* k6; Scalar* k7; DifferentialSystem<Scalar>* system; };
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
task_early_fulfill.c
// RUN: %libomp-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' \ // RUN: %libomp-run | %sort-threads | FileCheck %s // REQUIRES: ompt // Checked gcc 10.1 still does not support detach clause on task construct. // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10 // clang supports detach clause since version 11. // UNSUPPORTED: clang-10, clang-9, clang-8, clang-7 // icc compiler does not support detach clause. // UNSUPPORTED: icc #include "callback.h" #include <omp.h> int main() { #pragma omp parallel #pragma omp master { omp_event_handle_t event; #pragma omp task detach(event) if (0) { omp_fulfill_event(event); } #pragma omp taskwait } return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], // CHECK-SAME: parent_task_frame.exit=[[NULL]], // CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}}, // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: requested_team_size=3, // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]], // CHECK-SAME: parent_task_frame.exit=0x{{[0-f]+}}, // CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}}, // CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]], // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], // CHECK-SAME: second_task_id=[[TASK_ID]], // CHECK-SAME: prior_task_status=ompt_task_switch=7 // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[TASK_ID]], // CHECK-SAME: second_task_id=18446744073709551615, // CHECK-SAME: prior_task_status=ompt_task_early_fulfill=5 // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[TASK_ID]], // CHECK-SAME: second_task_id=[[IMPLICIT_TASK_ID]], // CHECK-SAME: prior_task_status=ompt_task_complete=1
sum.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <sys/time.h> enum { N = 10000000 }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } double sum(double *v, int low, int high) { if (low == high) return v[low]; int mid = (low + high) / 2; return sum(v, low, mid) + sum(v, mid + 1, high); } double sum_omp_tasks_maxthreads(double *v, int low, int high, int nthreads) { if (low == high) return v[low]; if (nthreads <= 1) return sum(v, low, high); double sum_left, sum_right; int mid = (low + high) / 2; #pragma omp task shared(sum_left) sum_left = sum_omp_tasks_maxthreads(v, low, mid, nthreads / 2); #pragma omp task shared(sum_right) sum_right = sum_omp_tasks_maxthreads(v, mid + 1, high, nthreads - nthreads / 2); #pragma omp taskwait return sum_left + sum_right; } double sum_omp(double *v, int low, int high) { double s = 0; #pragma omp parallel { #pragma omp single nowait s = sum_omp_tasks_maxthreads(v, low, high, omp_get_num_procs()); } return s; } double run_serial() { double *v = malloc(sizeof(*v) * N); for (int i = 0; i < N; i++) v[i] = i + 1.0; double t = wtime(); double res = sum(v, 0, N - 1); t = wtime() - t; printf("Result (serial): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N)); free(v); return t; } double run_parallel() { double *v = malloc(sizeof(*v) * N); for (int i = 0; i < N; i++) v[i] = i + 1.0; double t = wtime(); double res = sum_omp(v, 0, N - 1); t = wtime() - t; printf("Result (parallel): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N)); free(v); return t; } int main(int argc, char **argv) { printf("Recursive summation N = %d\n", N); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
GB_transpose.c
//------------------------------------------------------------------------------ // GB_transpose: C=A' or C=op(A'), with typecasting //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLS: GB_builder // Transpose a matrix, C=A', and optionally apply a unary operator and/or // typecast the values. The transpose may be done in-place, in which case C or // A are modified in-place. // There are two ways to use this method: // C = A' C and A are different // C = C' C is transposed in-place, (C==A aliased) // In both cases, the header for C and A must already be allocated (either // static or dynamic). A is never modified, unless C==A. C and A cannot be // NULL on input. If in place (C == A) then C and A is a valid matrix on input // (the input matrix A). If C != A, the contents of C are not defined on input, // and any prior content is freed. Either header may be static or dynamic. // The input matrix A may have shallow components (even if in-place), and the // output C may also have shallow components (even if the input matrix is not // shallow). // This function is CSR/CSC agnostic; it sets the output matrix format from // C_is_csc but otherwise ignores the CSR/CSC type of A and C. // The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is // m-by-n, then at most O(e/n) threads are used. The GB_builder method is more // scalable, but not as fast with a modest number of threads. #define GB_FREE_WORKSPACE \ { \ GB_FREE (&iwork, iwork_size) ; \ GB_FREE (&jwork, jwork_size) ; \ GB_FREE (&Swork, Swork_size) ; \ GB_WERK_POP (Count, int64_t) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_Matrix_free (&T) ; \ /* freeing C also frees A if transpose is done in-place */ \ GB_phbix_free (C) ; \ } #include "GB_transpose.h" #include "GB_build.h" #include "GB_apply.h" //------------------------------------------------------------------------------ // GB_transpose //------------------------------------------------------------------------------ GrB_Info GB_transpose // C=A', C=(ctype)A' or C=op(A') ( GrB_Matrix C, // output matrix C, possibly modified in-place GrB_Type ctype, // desired type of C; if NULL use A->type. // ignored if op is present (cast to op->ztype) const bool C_is_csc, // desired CSR/CSC format of C const GrB_Matrix A, // input matrix; C == A if done in place // no operator is applied if op is NULL const GB_Operator op_in, // unary/idxunop/binop to apply const GrB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) bool flipij, // if true, flip i,j for user idxunop GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs and determine if transpose is done in-place //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL) ; ASSERT (A != NULL) ; bool in_place = (A == C) ; GB_WERK_DECLARE (Count, int64_t) ; int64_t *iwork = NULL ; size_t iwork_size = 0 ; int64_t *jwork = NULL ; size_t jwork_size = 0 ; GB_void *Swork = NULL ; size_t Swork_size = 0 ; struct GB_Matrix_opaque T_header ; GrB_Matrix T = NULL ; GB_CLEAR_STATIC_HEADER (T, &T_header) ; ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ; ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ; ASSERT_OP_OK_OR_NULL (op_in, "unop/binop for GB_transpose", GB0) ; ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ; if (in_place) { GBURBLE ("(in-place transpose) ") ; } // get the current sparsity control of A float A_hyper_switch = A->hyper_switch ; float A_bitmap_switch = A->bitmap_switch ; int A_sparsity_control = A->sparsity_control ; int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; // wait if A has pending tuples or zombies; leave jumbled unless avdim == 1 if (GB_PENDING (A) || GB_ZOMBIES (A) || (avdim == 1 && GB_JUMBLED (A))) { GB_OK (GB_wait (A, "A", Context)) ; } ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_IMPLIES (avdim == 1, !GB_JUMBLED (A))) ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- GrB_Type atype = A->type ; size_t asize = atype->size ; GB_Type_code acode = atype->code ; bool A_is_bitmap = GB_IS_BITMAP (A) ; bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; int64_t anz = GB_nnz (A) ; int64_t anz_held = GB_nnz_held (A) ; int64_t anvec = A->nvec ; int64_t anvals = A->nvals ; //-------------------------------------------------------------------------- // determine the max number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // determine the type of C and get the unary, idxunop, binary operator //-------------------------------------------------------------------------- // If a unary, idxunop, or binary operator is present, C is always returned // as the ztype of the operator. The input ctype is ignored. GB_Operator op = NULL ; GB_Opcode opcode = GB_NOP_code ; if (op_in == NULL) { // no operator if (ctype == NULL) { // no typecasting if ctype is NULL ctype = atype ; } } else { opcode = op_in->opcode ; if (GB_IS_UNARYOP_CODE (opcode)) { // get the unary operator if (atype == op_in->xtype && opcode == GB_IDENTITY_unop_code) { // op is a built-in unary identity operator, with the same type // as A, so do not apply the operator and do not typecast. op // is NULL. ctype = atype ; } else { // apply the operator, z=unop(x) op = op_in ; ctype = op->ztype ; } } else // binary or idxunop { // get the binary or idxunop operator: only GB_apply calls // GB_transpose with op_in, and it ensures this condition holds: // first(A,y), second(x,A) have been renamed to identity(A), and // PAIR has been renamed one(A), so these cases do not occur here. ASSERT (!((opcode == GB_PAIR_binop_code) || (opcode == GB_FIRST_binop_code && !binop_bind1st) || (opcode == GB_SECOND_binop_code && binop_bind1st))) ; // apply the operator, z=binop(A,y), binop(x,A), or idxunop(A,y) op = op_in ; ctype = op->ztype ; } } bool user_idxunop = (opcode == GB_USER_idxunop_code) ; //-------------------------------------------------------------------------- // check for positional operators //-------------------------------------------------------------------------- bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GB_Operator save_op = op ; if (op_is_positional) { // do not apply the positional op until after the transpose; // replace op with the ONE operator, as a placeholder. C will be // constructed as iso, and needs to be expanded to non-iso when done. ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32 || ctype == GrB_BOOL) ; op = (GB_Operator) GB_unop_one (ctype->code) ; } else if (user_idxunop) { // do not apply the user op until after the transpose; replace with // no operator at all, with no typecast op = NULL ; ctype = atype ; } //-------------------------------------------------------------------------- // determine the iso status of C //-------------------------------------------------------------------------- size_t csize = ctype->size ; ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ; GB_iso_code C_code_iso = GB_iso_unop_code (A, op, binop_bind1st) ; bool C_iso = (C_code_iso != GB_NON_ISO) ; ASSERT (GB_IMPLIES (A->iso, C_iso)) ; if (C_iso && !op_is_positional) { GBURBLE ("(iso transpose) ") ; } else { GBURBLE ("(transpose) ") ; } //========================================================================== // T = A', T = (ctype) A', or T = op (A') //========================================================================== if (anz == 0) { //---------------------------------------------------------------------- // A is empty //---------------------------------------------------------------------- // create a new empty matrix T, with the new type and dimensions. // set T->iso = false OK GB_OK (GB_new_bix (&T, // hyper, existing header ctype, avdim, avlen, GB_Ap_calloc, C_is_csc, GxB_HYPERSPARSE, true, A_hyper_switch, 1, 1, true, false, Context)) ; } else if (A_is_bitmap || GB_as_if_full (A)) { //---------------------------------------------------------------------- // transpose a bitmap/as-if-full matrix or vector //---------------------------------------------------------------------- // A is either bitmap or as-is-full (full, or sparse or hypersparse // with all entries present, no zombies, no pending tuples, and not // jumbled). T = A' is either bitmap or full. int T_sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ; bool T_cheap = // T can be done quickly if: (avlen == 1 || avdim == 1) // A is a row or column vector, && op == NULL // no operator to apply, && atype == ctype ; // and no typecasting // allocate T if (T_cheap) { // just initialize the static header of T, not T->b or T->x GBURBLE ("(cheap transpose) ") ; info = GB_new (&T, // bitmap or full, existing header ctype, avdim, avlen, GB_Ap_null, C_is_csc, T_sparsity, A_hyper_switch, 1, Context) ; ASSERT (info == GrB_SUCCESS) ; } else { // allocate all of T, including T->b and T->x // set T->iso = C_iso OK GB_OK (GB_new_bix (&T, // bitmap or full, existing header ctype, avdim, avlen, GB_Ap_null, C_is_csc, T_sparsity, true, A_hyper_switch, 1, anz_held, true, C_iso, Context)) ; } T->magic = GB_MAGIC ; if (T_sparsity == GxB_BITMAP) { T->nvals = anvals ; // for bitmap case only } //---------------------------------------------------------------------- // T = A' //---------------------------------------------------------------------- int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ; if (T_cheap) { // no work to do. Transposing does not change A->b or A->x T->b = A->b ; T->b_size = A->b_size ; T->x = A->x ; T->x_size = A->x_size ; if (in_place) { // transplant A->b and A->x into T T->b_shallow = A->b_shallow ; T->x_shallow = A->x_shallow ; A->b = NULL ; A->x = NULL ; } else { // T is a purely shallow copy of A T->b_shallow = (A->b != NULL) ; T->x_shallow = true ; } T->iso = A->iso ; // OK } else if (op == NULL) { // do not apply an operator; optional typecast to T->type GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ; } else { // apply an operator, T has type op->ztype GB_transpose_op (T, C_code_iso, op, scalar, binop_bind1st, A, NULL, NULL, 0, nthreads) ; } ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ; ASSERT (!GB_JUMBLED (T)) ; } else if (avdim == 1) { //---------------------------------------------------------------------- // transpose a "column" vector into a "row" //---------------------------------------------------------------------- // transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen). // A must be sorted first. ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ; ASSERT (!GB_JUMBLED (A)) ; //---------------------------------------------------------------------- // allocate T //---------------------------------------------------------------------- // Initialized the header of T, with no content, and initialize the // type and dimension of T. T is hypersparse. info = GB_new (&T, // hyper; existing header ctype, 1, avlen, GB_Ap_null, C_is_csc, GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ; ASSERT (info == GrB_SUCCESS) ; // allocate T->p, T->i, and optionally T->x, but not T->h T->p = GB_MALLOC (anz+1, int64_t, &(T->p_size)) ; T->i = GB_MALLOC (anz , int64_t, &(T->i_size)) ; bool allocate_Tx = (op != NULL || C_iso) || (ctype != atype) ; if (allocate_Tx) { // allocate new space for the new typecasted numerical values of T T->x = GB_XALLOC (false, C_iso, anz, csize, &(T->x_size)) ; // x:OK } if (T->p == NULL || T->i == NULL || (allocate_Tx && T->x == NULL)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // numerical values of T: apply the op, typecast, or make shallow copy //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op != NULL || C_iso) { // T->x = unop (A), binop (A,scalar), or binop (scalar,A), or // compute the iso value of T = 1, A, or scalar, without any op info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op, scalar, binop_bind1st, flipij, A, Context) ; ASSERT (info == GrB_SUCCESS) ; } else if (ctype != atype) { // copy the values from A into T and cast from atype to ctype GB_cast_matrix (T, A, Context) ; } else { // no type change; numerical values of T are a shallow copy of A. ASSERT (!allocate_Tx) ; T->x = A->x ; T->x_size = A->x_size ; if (in_place) { // transplant A->x as T->x T->x_shallow = A->x_shallow ; A->x = NULL ; } else { // T->x is a shallow copy of A->x T->x_shallow = true ; } } // each entry in A becomes a non-empty vector in T; // T is a hypersparse 1-by-avlen matrix // transplant or shallow-copy A->i as the new T->h T->h = A->i ; T->h_size = A->i_size ; if (in_place) { // transplant A->i as T->h T->h_shallow = A->i_shallow ; A->i = NULL ; } else { // T->h is a shallow copy of A->i T->h_shallow = true ; } // T->p = 0:anz and T->i = zeros (1,anz), newly allocated T->plen = anz ; T->nvec = anz ; T->nvec_nonempty = anz ; // fill the vector pointers T->p int nthreads = GB_nthreads (anz, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < anz ; k++) { T->i [k] = 0 ; T->p [k] = k ; } T->p [anz] = anz ; T->iso = C_iso ; T->magic = GB_MAGIC ; } else if (avlen == 1) { //---------------------------------------------------------------------- // transpose a "row" into a "column" vector //---------------------------------------------------------------------- // transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1). // if A->vlen is 1, all vectors of A are implicitly sorted ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ; //---------------------------------------------------------------------- // allocate workspace, if needed //---------------------------------------------------------------------- int ntasks = 0 ; int nth = GB_nthreads (avdim, chunk, nthreads_max) ; if (nth > 1 && !A_is_hyper) { // ntasks and Count are not needed if nth == 1 ntasks = 8 * nth ; ntasks = GB_IMIN (ntasks, avdim) ; ntasks = GB_IMAX (ntasks, 1) ; GB_WERK_PUSH (Count, ntasks+1, int64_t) ; if (Count == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } // Allocate the header of T, with no content // and initialize the type and dimension of T. info = GB_new (&T, // sparse; existing header ctype, avdim, 1, GB_Ap_null, C_is_csc, GxB_SPARSE, A_hyper_switch, 0, Context) ; ASSERT (info == GrB_SUCCESS) ; T->iso = C_iso ; // OK // allocate new space for the values and pattern T->p = GB_CALLOC (2, int64_t, &(T->p_size)) ; if (!A_is_hyper) { // A is sparse, so new space is needed for T->i T->i = GB_MALLOC (anz, int64_t, &(T->i_size)) ; } bool allocate_Tx = (op != NULL || C_iso) || (ctype != atype) ; if (allocate_Tx) { // allocate new space for the new typecasted numerical values of T T->x = GB_XALLOC (false, C_iso, anz, csize, &(T->x_size)) ; // x:OK } if (T->p == NULL || (T->i == NULL && !A_is_hyper) || (T->x == NULL && allocate_Tx)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // numerical values of T: apply the op, typecast, or make shallow copy //---------------------------------------------------------------------- // numerical values: apply the operator, typecast, or make shallow copy if (op != NULL || C_iso) { // T->x = unop (A), binop (A,scalar), or binop (scalar,A), or // compute the iso value of T = 1, A, or scalar, without any op info = GB_apply_op ((GB_void *) T->x, ctype, C_code_iso, op, scalar, binop_bind1st, flipij, A, Context) ; ASSERT (info == GrB_SUCCESS) ; } else if (ctype != atype) { // copy the values from A into T and cast from atype to ctype GB_cast_matrix (T, A, Context) ; } else { // no type change; numerical values of T are a shallow copy of A. ASSERT (!allocate_Tx) ; T->x = A->x ; T->x_size = A->x_size ; if (in_place) { // transplant A->x as T->x T->x_shallow = A->x_shallow ; A->x = NULL ; } else { // T->x is a shallow copy of A->x T->x_shallow = true ; } } //---------------------------------------------------------------------- // compute T->i //---------------------------------------------------------------------- if (A_is_hyper) { //------------------------------------------------------------------ // each non-empty vector in A becomes an entry in T //------------------------------------------------------------------ T->i = A->h ; T->i_size = A->h_size ; if (in_place) { // transplant A->h as T->i T->i_shallow = A->h_shallow ; A->h = NULL ; } else { // T->i is a shallow copy of A->h T->i_shallow = true ; } } else { //------------------------------------------------------------------ // find the non-empty vectors of A, which become entries in T //------------------------------------------------------------------ if (nth == 1) { //-------------------------------------------------------------- // construct T->i with a single thread //-------------------------------------------------------------- int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (A->p [j] < A->p [j+1]) { T->i [k++] = j ; } } ASSERT (k == anz) ; } else { //-------------------------------------------------------------- // construct T->i in parallel //-------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = 0 ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (A->p [j] < A->p [j+1]) { k++ ; } } Count [tid] = k ; } GB_cumsum (Count, ntasks, NULL, 1, NULL) ; ASSERT (Count [ntasks] == anz) ; #pragma omp parallel for num_threads(nth) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend, k = Count [tid] ; GB_PARTITION (jstart, jend, avdim, tid, ntasks) ; for (int64_t j = jstart ; j < jend ; j++) { if (A->p [j] < A->p [j+1]) { T->i [k++] = j ; } } } } #ifdef GB_DEBUG int64_t k = 0 ; for (int64_t j = 0 ; j < avdim ; j++) { if (A->p [j] < A->p [j+1]) { ASSERT (T->i [k] == j) ; k++ ; } } ASSERT (k == anz) ; #endif } //--------------------------------------------------------------------- // vector pointers of T //--------------------------------------------------------------------- // T->p = [0 anz] ASSERT (T->plen == 1) ; ASSERT (T->nvec == 1) ; T->nvec_nonempty = (anz == 0) ? 0 : 1 ; T->p [1] = anz ; T->magic = GB_MAGIC ; ASSERT (!GB_JUMBLED (T)) ; } else { //---------------------------------------------------------------------- // transpose a general sparse or hypersparse matrix //---------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ; // T=A' with optional typecasting, or T=op(A') //---------------------------------------------------------------------- // select the method //---------------------------------------------------------------------- int nworkspaces_bucket, nthreads_bucket ; bool use_builder = GB_transpose_method (A, &nworkspaces_bucket, &nthreads_bucket, Context) ; //---------------------------------------------------------------------- // transpose the matrix with the selected method //---------------------------------------------------------------------- if (use_builder) { //------------------------------------------------------------------ // transpose via GB_builder //------------------------------------------------------------------ //------------------------------------------------------------------ // allocate and create iwork //------------------------------------------------------------------ // allocate iwork of size anz iwork = GB_MALLOC (anz, int64_t, &iwork_size) ; if (iwork == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // Construct the "row" indices of C, which are "column" indices of // A. This array becomes the permanent T->i on output. GB_OK (GB_extract_vector_list (iwork, A, Context)) ; //------------------------------------------------------------------ // allocate the output matrix and additional space (jwork and Swork) //------------------------------------------------------------------ // initialize the header of T, with no content // content, and initialize the type and dimension of T. info = GB_new (&T, // hyper, existing header ctype, avdim, avlen, GB_Ap_null, C_is_csc, GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ; ASSERT (info == GrB_SUCCESS) ; // if in_place, the prior A->p and A->h can now be freed if (in_place) { if (!A->p_shallow) GB_FREE (&A->p, A->p_size) ; if (!A->h_shallow) GB_FREE (&A->h, A->h_size) ; } GB_void *S_input = NULL ; // for the GB_builder method, if the transpose is done in-place and // A->i is not shallow, A->i can be used and then freed. // Otherwise, A->i is not modified at all. bool ok = true ; bool recycle_Ai = (in_place && !A->i_shallow) ; if (!recycle_Ai) { // allocate jwork of size anz jwork = GB_MALLOC (anz, int64_t, &jwork_size) ; ok = ok && (jwork != NULL) ; } if (op != NULL && !C_iso) { Swork = (GB_void *) GB_XALLOC (false, C_iso, anz, // x:OK csize, &Swork_size) ; ok = ok && (Swork != NULL) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //------------------------------------------------------------------ // construct jwork and Swork //------------------------------------------------------------------ // "row" indices of A become "column" indices of C if (recycle_Ai) { // A->i is used as workspace for the "column" indices of C. // jwork is A->i, and is freed by GB_builder. jwork = A->i ; jwork_size = A->i_size ; A->i = NULL ; ASSERT (in_place) ; } else { // copy A->i into jwork, making a deep copy. jwork is freed by // GB_builder. A->i is not modified, even if out of memory. GB_memcpy (jwork, A->i, anz * sizeof (int64_t), nthreads_max) ; } // numerical values: apply the op, typecast, or make shallow copy GrB_Type stype ; GB_void sscalar [GB_VLA(csize)] ; if (C_iso) { // apply the op to the iso scalar GB_iso_unop (sscalar, ctype, C_code_iso, op, A, scalar) ; S_input = sscalar ; // S_input is used instead of Swork Swork = NULL ; stype = ctype ; } else if (op != NULL) { // Swork = op (A) info = GB_apply_op (Swork, ctype, C_code_iso, op, scalar, binop_bind1st, flipij, A, Context) ; ASSERT (info == GrB_SUCCESS) ; // GB_builder will not need to typecast Swork to T->x, and it // may choose to transplant it into T->x S_input = NULL ; // Swork is used instead of S_input stype = ctype ; } else { // GB_builder will typecast S_input from atype to ctype if // needed. S_input is a shallow copy of Ax, and must not be // modified. ASSERT (!C_iso) ; ASSERT (!A->iso) ; S_input = (GB_void *) A->x ; // S_input is used instead of Swork Swork = NULL ; stype = atype ; } //------------------------------------------------------------------ // build the matrix: T = (ctype) A' or op ((xtype) A') //------------------------------------------------------------------ // internally, jwork is freed and then T->x is allocated, so the // total memory usage is anz * max (csize, sizeof(int64_t)). T is // always hypersparse. Either T, Swork, and S_input are all iso, // or all non-iso, depending on C_iso. GB_OK (GB_builder ( T, // create T using a static header ctype, // T is of type ctype avdim, // T->vlen = A->vdim, always > 1 avlen, // T->vdim = A->vlen, always > 1 C_is_csc, // T has the same CSR/CSC format as C &iwork, // iwork_handle, becomes T->i on output &iwork_size, &jwork, // jwork_handle, freed on output &jwork_size, &Swork, // Swork_handle, freed on output &Swork_size, false, // tuples are not sorted on input true, // tuples have no duplicates anz, // size of iwork, jwork, and Swork true, // is_matrix: unused NULL, NULL, // original I,J indices: not used here S_input, // array of values of type stype, not modified C_iso, // iso property of T is the same as C->iso anz, // number of tuples NULL, // no dup operator needed (input has no duplicates) stype, // type of S_input or Swork Context )) ; // GB_builder always frees jwork, and either frees iwork or // transplants it in to T->i and sets iwork to NULL. So iwork and // jwork are always NULL on output. GB_builder does not modify // S_input. ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ; ASSERT (!GB_JUMBLED (T)) ; } else { //------------------------------------------------------------------ // transpose via bucket sort //------------------------------------------------------------------ // T = A' and typecast to ctype GB_OK (GB_transpose_bucket (T, C_code_iso, ctype, C_is_csc, A, op, scalar, binop_bind1st, nworkspaces_bucket, nthreads_bucket, Context)) ; ASSERT_MATRIX_OK (T, "T from bucket", GB0) ; ASSERT (GB_JUMBLED_OK (T)) ; } } //========================================================================== // free workspace, apply positional op, and transplant/conform T into C //========================================================================== //-------------------------------------------------------------------------- // free workspace //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; if (in_place) { // free prior space of A, if transpose is done in-place GB_phbix_free (A) ; } //-------------------------------------------------------------------------- // transplant T into the result C //-------------------------------------------------------------------------- // transplant the control settings from A to C C->hyper_switch = A_hyper_switch ; C->bitmap_switch = A_bitmap_switch ; C->sparsity_control = A_sparsity_control ; GB_OK (GB_transplant (C, ctype, &T, Context)) ; ASSERT_MATRIX_OK (C, "C transplanted in GB_transpose", GB0) ; ASSERT_TYPE_OK (ctype, "C type in GB_transpose", GB0) ; //-------------------------------------------------------------------------- // apply a positional operator or user idxunop after transposing the matrix //-------------------------------------------------------------------------- op = save_op ; if (op_is_positional) { if (C->iso) { // If C was constructed as iso; it needs to be expanded first, // but do not initialize the values. These are computed by // GB_apply_op below. // set C->iso = false OK: no need to burble GB_OK (GB_convert_any_to_non_iso (C, false, Context)) ; } // the positional unary op is applied in-place: C->x = op (C) GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, op, scalar, binop_bind1st, flipij, C, Context)) ; } else if (user_idxunop) { if (C->iso) { // If C was constructed as iso; it needs to be expanded and // initialized first. GB_OK (GB_convert_any_to_non_iso (C, true, Context)) ; } if (C->type == op->ztype) { // the user-defined index unary op is applied in-place: C->x = op // (C) where the type of C does not change GB_OK (GB_apply_op ((GB_void *) C->x, ctype, GB_NON_ISO, op, scalar, binop_bind1st, flipij, C, Context)) ; } else // op is a user-defined index unary operator { // apply the operator to the transposed matrix: // C = op (C), but not in-place since the type of C is changing ctype = op->ztype ; csize = ctype->size ; size_t Cx_size = 0 ; GB_void *Cx_new = NULL ; if (GB_IS_BITMAP (C)) { // calloc the space so the new C->x has no uninitialized space Cx_new = GB_CALLOC (anz_held*csize, GB_void, &Cx_size) ; // x:OK } else { // malloc is fine; all C->x will be written Cx_new = GB_MALLOC (anz_held*csize, GB_void, &Cx_size) ; // x:OK } if (Cx_new == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // Cx_new = op (C) GB_OK (GB_apply_op (Cx_new, ctype, GB_NON_ISO, op, scalar, false, flipij, C, Context)) ; // transplant Cx_new as C->x and finalize the type of C GB_FREE (&(C->x), C->x_size) ; C->x = Cx_new ; C->x_size = Cx_size ; C->type = ctype ; C->iso = false ; } } //-------------------------------------------------------------------------- // conform the result to the desired sparsity structure of A //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ; GB_OK (GB_conform (C, Context)) ; ASSERT_MATRIX_OK (C, "C output of GB_transpose", GB0) ; return (GrB_SUCCESS) ; }
q3.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "common.h" const i32 REPEAT_COUNT = 512; typedef struct { i32 x; i32 y; i32 temp; } heat_source; typedef struct { i32 dim_x; i32 dim_y; i32 heat_count; heat_source* heat_sources; } heat_data; heat_data* get_data_from_file() { FILE* file = fopen("input2.txt", "r"); heat_data* data = malloc(sizeof(heat_data)); i32 dim_x, dim_y; fscanf(file, "Dimensions:%d X %d\n", &dim_x, &dim_y); data->dim_x = dim_x; data->dim_y = dim_y; i32 heat_count; fscanf(file, "Number of Heat Source: %d\n", &heat_count); data->heat_count = heat_count; fscanf(file, "%*[^\n]\n"); data->heat_sources = malloc(sizeof(heat_source) * heat_count); for (i32 i = 0; i < heat_count; i++) { heat_source source; fscanf(file, "(%d, %d, %d)\n", &source.x, &source.y, &source.temp); data->heat_sources[i] = source; } fclose(file); return data; } void write_output_to_file(i32** output, heat_data* data) { FILE* file = fopen("output2.txt", "w"); for (i32 y = 0; y < data->dim_y; y++) { for (i32 x = 0; x < data->dim_x; x++) { if (x == data->dim_x - 1) { fprintf(file, "%d", output[y][x]); } else { fprintf(file, "%d ", output[y][x]); } } fprintf(file, "\n"); } fclose(file); } f32 get_heat_multiplier(i32 x1, i32 y1, i32 x2, i32 y2) { i32 dist_squared = (abs(x1 - x2) * abs(x1 - x2)) + (abs(y1 - y2) * abs(y1 - y2)); if (dist_squared == 0) { return 1; } else if (dist_squared > 0 && dist_squared <= 5) { return 0.85; } else if (dist_squared > 5 && dist_squared <= 25) { return 0.75; } else if (dist_squared > 25 && dist_squared <= 49) { return 0.65; } else if (dist_squared > 49 && dist_squared <= 61) { return 0.60; } else { return 0; } } f64 do_it(i32** output, heat_data* data) { f64 before = omp_get_wtime(); #pragma omp parallel for collapse(2) for (i32 y = 0; y < data->dim_y; y++) { for (i32 x = 0; x < data->dim_x; x++) { i32 max_heat = -1; for (i32 i = 0; i < data->heat_count; i++) { heat_source* source = &(data->heat_sources[i]); i32 heat = (i32)(((f32)source->temp) * get_heat_multiplier(x, y, source->x, source->y)); if (heat > max_heat) { max_heat = heat; } } output[y][x] = max_heat; } } f64 after = omp_get_wtime(); return after - before; } i32 main(i32 argc, char** argv) { if (argc > 1) { i32 thread_count = atoi(argv[1]); omp_set_num_threads(thread_count); } heat_data* data = get_data_from_file(); i32** output = malloc(sizeof(i32*) * data->dim_y); for (i32 i = 0; i < data->dim_y; i++) { output[i] = malloc(sizeof(i32) * data->dim_x); } f64 time_sum = 0; for (i32 i = 0; i < REPEAT_COUNT; i++) { time_sum += do_it(output, data); } printf("%fmicroseconds\n", (time_sum * 1000000) / REPEAT_COUNT); write_output_to_file(output, data); for (i32 i = 0; i < data->dim_y; i++) { free(output[i]); } free(output); free(data->heat_sources); free(data); }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/MacroDisable.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (*lhs).isAligned() ); const bool rhsAligned( (*rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (*lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (*lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (*lhs).size() ) continue; const size_t size( min( sizePerThread, (*lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( *rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( *rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( *rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( *rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (*lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (*lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (*lhs).size() ) continue; const size_t size( min( sizePerThread, (*lhs).size() - index ) ); auto target( subvector<unaligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( *rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); assign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { assign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ assign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); addAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { addAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); subAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { subAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); multAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { multAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ multAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); divAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { divAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ divAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
uts_omp_task_shmem.c
/* * ---- The Unbalanced Tree Search (UTS) Benchmark ---- * * Copyright (c) 2010 See AUTHORS file for copyright holders * * This file is part of the unbalanced tree search benchmark. This * project is licensed under the MIT Open Source license. See the LICENSE * file for copyright and licensing information. * * UTS is a collaborative project between researchers at the University of * Maryland, the University of North Carolina at Chapel Hill, and the Ohio * State University. See AUTHORS file for more information. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <shmem.h> #include "uts.h" /*********************************************************** * * * Compiler Type (these flags are set by at compile time) * * (default) ANSI C compiler - sequential execution * * (_OPENMP) OpenMP enabled C compiler * * (__UPC__) UPC compiler * * (_SHMEM) Cray Shmem * * (__PTHREADS__) Pthreads multithreaded execution * * * ***********************************************************/ #if defined(_OPENMP) /**** OpenMP Definitions ****/ #include <omp.h> #define PARALLEL 1 #define COMPILER_TYPE 1 #define SHARED #define SHARED_INDEF #define VOLATILE volatile #define MAX_OMP_THREADS 32 #define MAX_SHMEM_THREADS 1024 #define LOCK_T omp_lock_t #define GET_NUM_THREADS omp_get_num_threads() #define GET_THREAD_NUM omp_get_thread_num() #define SET_LOCK(zlk) omp_set_lock(zlk) #define UNSET_LOCK(zlk) omp_unset_lock(zlk) #define INIT_LOCK(zlk) zlk=omp_global_lock_alloc() #define INIT_SINGLE_LOCK(zlk) zlk=omp_global_lock_alloc() #define SMEMCPY memcpy #define ALLOC malloc #define BARRIER // OpenMP helper function to match UPC lock allocation semantics omp_lock_t * omp_global_lock_alloc() { omp_lock_t *lock = (omp_lock_t *) malloc(sizeof(omp_lock_t) + 128); omp_init_lock(lock); return lock; } #else #error Only supports OMP #endif /* END Par. Model Definitions */ /*********************************************************** * Parallel execution parameters * ***********************************************************/ int doSteal = PARALLEL; // 1 => use work stealing int chunkSize = 20; // number of nodes to move to/from shared area int cbint = 1; // Cancellable barrier polling interval int pollint = 1; // BUPC Polling interval int n_nodes = 0; int n_leaves = 0; #ifdef THREAD_METADATA typedef struct _thread_metadata { size_t ntasks; } thread_metadata; thread_metadata t_metadata[MAX_OMP_THREADS]; #endif typedef struct _per_thread_info { int n_nodes; int n_leaves; } per_thread_info; per_thread_info thread_info[MAX_OMP_THREADS]; #define N_BUFFERED_STEALS 16 Node steal_buffer[N_BUFFERED_STEALS]; volatile int n_buffered_steals = 0; long steal_buffer_locks[MAX_SHMEM_THREADS]; int complete_pes = 0; static int pe, npes; static int steal_from(int target_pe, Node *stolen_out) { int remote_buffered_steals; shmem_set_lock(&steal_buffer_locks[target_pe]); shmem_int_get(&remote_buffered_steals, (int *)&n_buffered_steals, 1, target_pe); int stole_something = 0; if (remote_buffered_steals > 0) { remote_buffered_steals--; shmem_getmem(stolen_out, &steal_buffer[remote_buffered_steals], sizeof(Node), target_pe); shmem_int_put((int *)&n_buffered_steals, &remote_buffered_steals, 1, target_pe); stole_something = 1; } shmem_clear_lock(&steal_buffer_locks[target_pe]); return stole_something; } static int remote_steal(Node *stolen_out) { int pe_above = (pe + 1) % npes; int pe_below = pe - 1; if (pe_below < 0) pe_below = npes - 1; int target_pe; // First scan through all PEs looking for work for (target_pe = 0; target_pe < npes; target_pe++) { if (target_pe == pe) continue; if (steal_from(target_pe, stolen_out)) { return 1; } } const int finish_check_periodicity = 3; int ndone = shmem_int_finc(&complete_pes, 0) + 1; int attempts = 0; while (ndone != npes) { // Try to remote steal if (steal_from(pe_above, stolen_out) || steal_from(pe_below, stolen_out)) { shmem_int_add(&complete_pes, -1, 0); return 1; } pe_above = (pe_above + 1) % npes; pe_below = pe_below - 1; if (pe_below < 0) pe_below = npes - 1; attempts++; if (attempts % finish_check_periodicity == 0) { ndone = shmem_int_fadd(&complete_pes, 0, 0); } } assert(ndone == npes); return 0; } #ifdef __BERKELEY_UPC__ /* BUPC nonblocking I/O Handles */ bupc_handle_t cb_handle = BUPC_COMPLETE_HANDLE; const int local_cb_cancel = 1; #endif /*********************************************************** * Tree statistics (if selected via UTS_STAT) * * compute overall size and imbalance metrics * * and histogram size and imbalance per level * ***********************************************************/ #ifdef UTS_STAT /* Check that we are not being asked to compile parallel with stats. * Parallel stats collection is presently not supported. */ #if PARALLEL #error "ERROR: Parallel stats collection is not supported!" #endif #define MAXHISTSIZE 2000 // max tree depth in histogram int stats = 1; int unbType = 1; int maxHeight = 0; // maximum depth of tree double maxImb = 0; // maximum imbalance double minImb = 1; double treeImb =-1; // Overall imbalance, undefined int hist[MAXHISTSIZE+1][2]; // average # nodes per level double unbhist[MAXHISTSIZE+1][3]; // average imbalance per level int *rootSize; // size of the root's children double *rootUnb; // imbalance of root's children /* Tseng statistics */ int totalNodes = 0; double imb_max = 0; // % of work in largest child (ranges from 100/n to 100%) double imb_avg = 0; double imb_devmaxavg = 0; // ( % of work in largest child ) - ( avg work ) double imb_normdevmaxavg = 0; // ( % of work in largest child ) - ( avg work ) / ( 100% - avg work ) #else int stats = 0; int unbType = -1; #endif /*********************************************************** * Execution Tracing * ***********************************************************/ #define SS_WORK 0 #define SS_SEARCH 1 #define SS_IDLE 2 #define SS_OVH 3 #define SS_CBOVH 4 #define SS_NSTATES 5 /* session record for session visualization */ struct sessionRecord_t { double startTime, endTime; }; typedef struct sessionRecord_t SessionRecord; /* steal record for steal visualization */ struct stealRecord_t { long int nodeCount; /* count nodes generated during the session */ int victimThread; /* thread from which we stole the work */ }; typedef struct stealRecord_t StealRecord; /* Store debugging and trace data */ struct metaData_t { SessionRecord sessionRecords[SS_NSTATES][20000]; /* session time records */ StealRecord stealRecords[20000]; /* steal records */ }; typedef struct metaData_t MetaData; /* holds text string for debugging info */ char debug_str[1000]; /*********************************************************** * StealStack types * ***********************************************************/ /*********************************************************** * Global shared state * ***********************************************************/ // termination detection VOLATILE SHARED int cb_cancel; VOLATILE SHARED int cb_count; VOLATILE SHARED int cb_done; LOCK_T * cb_lock; /*********************************************************** * UTS Implementation Hooks * ***********************************************************/ // Return a string describing this implementation char * impl_getName() { char * name[] = {"Sequential C", "C/OpenMP", "UPC", "SHMEM", "PThreads"}; return name[COMPILER_TYPE]; } // construct string with all parameter settings int impl_paramsToStr(char *strBuf, int ind) { int n_omp_threads; #pragma omp parallel #pragma omp single n_omp_threads = omp_get_num_threads(); ind += sprintf(strBuf+ind, "Execution strategy: "); if (PARALLEL) { ind += sprintf(strBuf+ind, "Parallel search using %d threads total (%d " "SHMEM PEs, %d OMP threads per PE)\n", npes * n_omp_threads, npes, n_omp_threads); if (doSteal) { ind += sprintf(strBuf+ind, " Load balance by work stealing, chunk size = %d nodes\n",chunkSize); ind += sprintf(strBuf+ind, " CBarrier Interval: %d\n", cbint); ind += sprintf(strBuf+ind, " Polling Interval: %d\n", pollint); } else ind += sprintf(strBuf+ind, " No load balancing.\n"); } else ind += sprintf(strBuf+ind, "Iterative sequential search\n"); return ind; } int impl_parseParam(char *param, char *value) { int err = 0; // Return 0 on a match, nonzero on an error switch (param[1]) { #if (PARALLEL == 1) case 'c': chunkSize = atoi(value); break; case 's': doSteal = atoi(value); if (doSteal != 1 && doSteal != 0) err = 1; break; case 'i': cbint = atoi(value); break; #ifdef __BERKELEY_UPC__ case 'I': pollint = atoi(value); break; #endif #else /* !PARALLEL */ #ifdef UTS_STAT case 'u': unbType = atoi(value); if (unbType > 2) { err = 1; break; } if (unbType < 0) stats = 0; else stats = 1; break; #endif #endif /* PARALLEL */ default: err = 1; break; } return err; } void impl_helpMessage() { if (PARALLEL) { printf(" -s int zero/nonzero to disable/enable work stealing\n"); printf(" -c int chunksize for work stealing\n"); printf(" -i int set cancellable barrier polling interval\n"); #ifdef __BERKELEY_UPC__ printf(" -I int set working bupc_poll() interval\n"); #endif #ifdef __PTHREADS__ printf(" -T int set number of threads\n"); #endif } else { #ifdef UTS_STAT printf(" -u int unbalance measure (-1: none; 0: min/size; 1: min/n; 2: max/n)\n"); #else printf(" none.\n"); #endif } } void impl_abort(int err) { #if defined(__UPC__) upc_global_exit(err); #elif defined(_OPENMP) exit(err); #elif defined(_SHMEM) exit(err); #else exit(err); #endif } /*********************************************************** * * * FUNCTIONS * * * ***********************************************************/ /* * StealStack * Stack of nodes with sharing at the bottom of the stack * and exclusive access at the top for the "owning" thread * which has affinity to the stack's address space. * * * All operations on the shared portion of the stack * must be guarded using the stack-specific lock. * * Elements move between the shared and exclusive * portion of the stack solely under control of the * owning thread. (ss_release and ss_acquire) * * workAvail is the count of elements in the shared * portion of the stack. It may be read without * acquiring the stack lock, but of course its value * may not be acurate. Idle threads read workAvail in * this speculative fashion to minimize overhead to * working threads. * * Elements can be stolen from the bottom of the shared * portion by non-owning threads. The values are * reserved under lock by the stealing thread, and then * copied without use of the lock (currently space for * reserved values is never reclaimed). * */ /* fatal error */ void ss_error(char *str) { printf("*** [Thread %i] %s\n",GET_THREAD_NUM, str); exit(4); } #ifdef UTS_STAT /* * Statistics, * : number of nodes per level * : imbalanceness of nodes per level * */ void initHist() { int i; for (i=0; i<MAXHISTSIZE; i++){ hist[i][0]=0; hist[i][1]=0; unbhist[i][1]=1; unbhist[i][2]=0; } } void updateHist(Node* c, double unb) { if (c->height<MAXHISTSIZE){ hist[c->height][1]++; hist[c->height][0]+=c->numChildren; unbhist[c->height][0]+=unb; if (unbhist[c->height][1]>unb) unbhist[c->height][1]=unb; if (unbhist[c->height][2]<unb) unbhist[c->height][2]=unb; } else { hist[MAXHISTSIZE][1]++; hist[MAXHISTSIZE][0]+=c->numChildren; } } void showHist(FILE *fp) { int i; fprintf(fp, "depth\tavgNumChildren\t\tnumChildren\t imb\t maxImb\t minImb\t\n"); for (i=0; i<MAXHISTSIZE; i++){ if ((hist[i][0]!=0)&&(hist[i][1]!=0)) fprintf(fp, "%d\t%f\t%d\t %lf\t%lf\t%lf\n", i, (double)hist[i][0]/hist[i][1], hist[i][0], unbhist[i][0]/hist[i][1], unbhist[i][1], unbhist[i][2]); } } double getImb(Node *c) { int i=0; double avg=.0, tmp=.0; double unb=0.0; avg=(double)c->sizeChildren/c->numChildren; for (i=0; i<c->numChildren; i++){ if ((type==BIN)&&(c->pp==NULL)) { if (unbType<2) tmp=min((double)rootSize[i]/avg, avg/(double)rootSize[i]); else tmp=max((double)rootSize[i]/avg, avg/(double)rootSize[i]); if (unbType>0) unb+=tmp*rootUnb[i]; else unb+=tmp*rootUnb[i]*rootSize[i]; } else{ if (unbType<2) tmp=min((double)c->size[i]/avg, avg/(double)c->size[i]); else tmp=max((double)c->size[i]/avg, avg/(double)c->size[i]); if (unbType>0) unb+=tmp*c->unb[i]; else unb+=tmp*c->unb[i]*c->size[i]; } } if (unbType>0){ if (c->numChildren>0) unb=unb/c->numChildren; else unb=1.0; } else { if (c->sizeChildren>1) unb=unb/c->sizeChildren; else unb=1.0; } if ((debug & 1) && unb>1) printf("unb>1%lf\t%d\n", unb, c->numChildren); return unb; } void getImb_Tseng(Node *c) { double t_max, t_avg, t_devmaxavg, t_normdevmaxavg; if (c->numChildren==0) { t_avg =0; t_max =0; } else { t_max = (double)c->maxSizeChildren/(c->sizeChildren-1); t_avg = (double)1/c->numChildren; } t_devmaxavg = t_max-t_avg; if (debug & 1) printf("max\t%lf, %lf, %d, %d, %d\n", t_max, t_avg, c->maxSizeChildren, c->sizeChildren, c->numChildren); if (1-t_avg==0) t_normdevmaxavg = 1; else t_normdevmaxavg = (t_max-t_avg)/(1-t_avg); imb_max += t_max; imb_avg += t_avg; imb_devmaxavg += t_devmaxavg; imb_normdevmaxavg +=t_normdevmaxavg; } void updateParStat(Node *c) { double unb; totalNodes++; if (maxHeight<c->height) maxHeight=c->height; unb=getImb(c); maxImb=max(unb, maxImb); minImb=min(unb, minImb); updateHist(c, unb); getImb_Tseng(c); if (c->pp!=NULL){ if ((c->type==BIN)&&(c->pp->pp==NULL)){ rootSize[c->pp->ind]=c->sizeChildren; rootUnb[c->pp->ind]=unb; } else{ c->pp->size[c->pp->ind]=c->sizeChildren; c->pp->unb[c->pp->ind]=unb; } /* update statistics per node*/ c->pp->ind++; c->pp->sizeChildren+=c->sizeChildren; if (c->pp->maxSizeChildren<c->sizeChildren) c->pp->maxSizeChildren=c->sizeChildren; } else treeImb = unb; } #endif /* * Tree Implementation * */ void initNode(Node * child) { child->type = -1; child->height = -1; child->numChildren = -1; // not yet determined #ifdef UTS_STAT if (stats){ int i; child->ind = 0; child->sizeChildren = 1; child->maxSizeChildren = 0; child->pp = NULL; for (i = 0; i < MAXNUMCHILDREN; i++){ child->size[i] = 0; child->unb[i] = 0.0; } } #endif } void initRootNode(Node * root, int type) { uts_initRoot(root, type); #ifdef TRACE stealStack[0]->md->stealRecords[0].victimThread = 0; // first session is own "parent session" #endif #ifdef UTS_STAT if (stats){ int i; root->ind = 0; root->sizeChildren = 1; root->maxSizeChildren = 1; root->pp = NULL; if (type != BIN){ for (i=0; i<MAXNUMCHILDREN; i++){ root->size[i] = 0; root->unb[i] =.0; } } else { int rbf = (int) ceil(b_0); rootSize = malloc(rbf*sizeof(int)); rootUnb = malloc(rbf*sizeof(double)); for (i = 0; i < rbf; i++) { rootSize[i] = 0; rootUnb[i] = 0.0; } } } #endif } /* * Generate all children of the parent * * details depend on tree type, node type and shape function * */ void genChildren(Node * parent, Node * child) { int parentHeight = parent->height; int numChildren, childType; #ifdef THREAD_METADATA t_metadata[omp_get_thread_num()].ntasks += 1; #endif thread_info[omp_get_thread_num()].n_nodes++; numChildren = uts_numChildren(parent); childType = uts_childType(parent); // record number of children in parent parent->numChildren = numChildren; // construct children and push onto stack if (numChildren > 0) { int i, j; child->type = childType; child->height = parentHeight + 1; #ifdef UTS_STAT if (stats) { child->pp = parent; // pointer to parent } #endif const unsigned char * parent_state = parent->state.state; unsigned char * child_state = child->state.state; for (i = 0; i < numChildren; i++) { for (j = 0; j < computeGranularity; j++) { // TBD: add parent height to spawn // computeGranularity controls number of rng_spawn calls per node rng_spawn(parent_state, child_state, i); } Node parent = *child; int made_available_for_stealing = 0; if (omp_get_thread_num() == 0 && n_buffered_steals < N_BUFFERED_STEALS) { shmem_set_lock(&steal_buffer_locks[pe]); if (n_buffered_steals < N_BUFFERED_STEALS) { steal_buffer[n_buffered_steals++] = parent; made_available_for_stealing = 1; } shmem_clear_lock(&steal_buffer_locks[pe]); } if (!made_available_for_stealing) { #pragma omp task untied firstprivate(parent) if(parent.height < 9) { Node child; initNode(&child); genChildren(&parent, &child); } } } } else { thread_info[omp_get_thread_num()].n_leaves++; } } // causes one or more threads waiting at barrier, if any, // to be released #ifdef TRACE // print session records for each thread (used when trace is enabled) void printSessionRecords() { int i, j, k; double offset; for (i = 0; i < GET_NUM_THREADS; i++) { offset = startTime[i] - startTime[0]; for (j = 0; j < SS_NSTATES; j++) for (k = 0; k < stealStack[i]->entries[j]; k++) { printf ("%d %d %f %f", i, j, stealStack[i]->md->sessionRecords[j][k].startTime - offset, stealStack[i]->md->sessionRecords[j][k].endTime - offset); if (j == SS_WORK) printf (" %d %ld", stealStack[i]->md->stealRecords[k].victimThread, stealStack[i]->md->stealRecords[k].nodeCount); printf ("\n"); } } } #endif // display search statistics void showStats(double elapsedSecs) { int i; int tnodes = 0, tleaves = 0, trel = 0, tacq = 0, tsteal = 0, tfail= 0; int mdepth = 0, mheight = 0; double twork = 0.0, tsearch = 0.0, tidle = 0.0, tovh = 0.0, tcbovh = 0.0; // // combine measurements from all threads // for (i = 0; i < GET_NUM_THREADS; i++) { // tnodes += stealStack[i]->nNodes; // tleaves += stealStack[i]->nLeaves; // trel += stealStack[i]->nRelease; // tacq += stealStack[i]->nAcquire; // tsteal += stealStack[i]->nSteal; // tfail += stealStack[i]->nFail; // twork += stealStack[i]->time[SS_WORK]; // tsearch += stealStack[i]->time[SS_SEARCH]; // tidle += stealStack[i]->time[SS_IDLE]; // tovh += stealStack[i]->time[SS_OVH]; // tcbovh += stealStack[i]->time[SS_CBOVH]; // mdepth = max(mdepth, stealStack[i]->maxStackDepth); // mheight = max(mheight, stealStack[i]->maxTreeDepth); // } // if (trel != tacq + tsteal) { // printf("*** error! total released != total acquired + total stolen\n"); // } // uts_showStats(GET_NUM_THREADS, chunkSize, elapsedSecs, n_nodes, n_leaves, mheight); // // if (verbose > 1) { // if (doSteal) { // printf("Total chunks released = %d, of which %d reacquired and %d stolen\n", // trel, tacq, tsteal); // printf("Failed steal operations = %d, ", tfail); // } // // printf("Max stealStack size = %d\n", mdepth); // printf("Avg time per thread: Work = %.6f, Search = %.6f, Idle = %.6f\n", (twork / GET_NUM_THREADS), // (tsearch / GET_NUM_THREADS), (tidle / GET_NUM_THREADS)); // printf(" Overhead = %6f, CB_Overhead = %6f\n\n", (tovh / GET_NUM_THREADS), // (tcbovh/GET_NUM_THREADS)); // } // // // per thread execution info // if (verbose > 2) { // for (i = 0; i < GET_NUM_THREADS; i++) { // printf("** Thread %d\n", i); // printf(" # nodes explored = %d\n", stealStack[i]->nNodes); // printf(" # chunks released = %d\n", stealStack[i]->nRelease); // printf(" # chunks reacquired = %d\n", stealStack[i]->nAcquire); // printf(" # chunks stolen = %d\n", stealStack[i]->nSteal); // printf(" # failed steals = %d\n", stealStack[i]->nFail); // printf(" maximum stack depth = %d\n", stealStack[i]->maxStackDepth); // printf(" work time = %.6f secs (%d sessions)\n", // stealStack[i]->time[SS_WORK], stealStack[i]->entries[SS_WORK]); // printf(" overhead time = %.6f secs (%d sessions)\n", // stealStack[i]->time[SS_OVH], stealStack[i]->entries[SS_OVH]); // printf(" search time = %.6f secs (%d sessions)\n", // stealStack[i]->time[SS_SEARCH], stealStack[i]->entries[SS_SEARCH]); // printf(" idle time = %.6f secs (%d sessions)\n", // stealStack[i]->time[SS_IDLE], stealStack[i]->entries[SS_IDLE]); // printf(" wakeups = %d, false wakeups = %d (%.2f%%)", // stealStack[i]->wakeups, stealStack[i]->falseWakeups, // (stealStack[i]->wakeups == 0) ? 0.00 : ((((double)stealStack[i]->falseWakeups)/stealStack[i]->wakeups)*100.0)); // printf("\n"); // } // } // // #ifdef TRACE // printSessionRecords(); // #endif // // // tree statistics output to stat.txt, if requested // #ifdef UTS_STAT // if (stats) { // FILE *fp; // char * tmpstr; // char strBuf[5000]; // int ind = 0; // // fp = fopen("stat.txt", "a+w"); // fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n"); // ind = uts_paramsToStr(strBuf, ind); // ind = impl_paramsToStr(strBuf, ind); // //showParametersStr(strBuf); // fprintf(fp, "%s\n", strBuf); // // fprintf(fp, "\nTotal nodes = %d\n", totalNodes); // fprintf(fp, "Max depth = %d\n\n", maxHeight); // fprintf(fp, "Tseng ImbMeasure(overall)\n max:\t\t%lf \n avg:\t\t%lf \n devMaxAvg:\t %lf\n normDevMaxAvg: %lf\t\t\n\n", // imb_max/totalNodes, imb_avg/totalNodes, imb_devmaxavg/totalNodes, // imb_normdevmaxavg/totalNodes); // // switch (unbType){ // case 0: tmpstr = "(min imb weighted by size)"; break; // case 1: tmpstr = "(min imb not weighted by size)"; break; // case 2: tmpstr = "(max imb not weighted by size)"; break; // default: tmpstr = "(?unknown measure)"; break; // } // fprintf(fp, "ImbMeasure:\t%s\n Overall:\t %lf\n Max:\t\t%lf\n Min:\t\t%lf\n\n", // tmpstr, treeImb, minImb, maxImb); // showHist(fp); // fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n\n\n"); // fclose(fp); // } // #endif } /* Main() function for: Sequential, OpenMP, UPC, and Shmem * * Notes on execution model: * - under openMP, global vars are all shared * - under UPC, global vars are private unless explicitly shared * - UPC is SPMD starting with main, OpenMP goes SPMD after * parsing parameters */ int main(int argc, char *argv[]) { #ifdef THREAD_METADATA memset(t_metadata, 0x00, MAX_OMP_THREADS * sizeof(thread_metadata)); #endif memset(thread_info, 0x00, MAX_OMP_THREADS * sizeof(per_thread_info)); memset(steal_buffer_locks, 0x00, MAX_SHMEM_THREADS * sizeof(long)); shmem_init(); pe = shmem_my_pe(); npes = shmem_n_pes(); /* determine benchmark parameters (all PEs) */ uts_parseParams(argc, argv); #ifdef UTS_STAT if (stats) { initHist(); } #endif double t1, t2, et; /* show parameter settings */ if (pe == 0) { uts_printParams(); } Node root; initRootNode(&root, type); shmem_barrier_all(); /* time parallel search */ t1 = uts_wctime(); int n_omp_threads; /********** SPMD Parallel Region **********/ #pragma omp parallel { #pragma omp master { int first = 1; n_omp_threads = omp_get_num_threads(); assert(n_omp_threads <= MAX_OMP_THREADS); Node child; retry: initNode(&child); if (first) { if (pe == 0) { genChildren(&root, &child); } } else { genChildren(&root, &child); } first = 0; #pragma omp taskwait if (n_buffered_steals > 0) { shmem_set_lock(&steal_buffer_locks[pe]); if (n_buffered_steals > 0) { n_buffered_steals--; memcpy(&root, &steal_buffer[n_buffered_steals], sizeof(root)); shmem_clear_lock(&steal_buffer_locks[pe]); goto retry; } else { shmem_clear_lock(&steal_buffer_locks[pe]); } } const int got_more_work = remote_steal(&root); if (got_more_work == 1) { goto retry; } } } shmem_barrier_all(); t2 = uts_wctime(); et = t2 - t1; int i; for (i = 0; i < MAX_OMP_THREADS; i++) { n_nodes += thread_info[i].n_nodes; n_leaves += thread_info[i].n_leaves; } shmem_barrier_all(); if (pe != 0) { shmem_int_add(&n_nodes, n_nodes, 0); shmem_int_add(&n_leaves, n_leaves, 0); } shmem_barrier_all(); if (pe == 0) { showStats(et); } /********** End Parallel Region **********/ #ifdef THREAD_METADATA int p; for (p = 0; p < npes; p++) { if (p == pe) { printf("\n"); int i; for (i = 0; i < n_omp_threads; i++) { printf("PE %d, thread %d: %lu tasks\n", p, i, t_metadata[i].ntasks); } } shmem_barrier_all(); } #endif shmem_finalize(); return 0; }
Example_target_mapper.2.c
/* * @@name: target_mapper_map.2.c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> // N MUST BE EVEN #define N 100 typedef struct dzmat { double r_m[N][N]; double i_m[N][N]; } dzmat_t; #pragma omp declare mapper( top_id: dzmat_t v) \ map(v.r_m[0:N/2][0:N], \ v.i_m[0:N/2][0:N] ) #pragma omp declare mapper(bottom_id: dzmat_t v) \ map(v.r_m[N/2:N/2][0:N], \ v.i_m[N/2:N/2][0:N] ) void dzmat_init(dzmat_t *z, int is, int ie, int n); //initialization void host_add( dzmat_t *a, dzmat_t *b, dzmat_t *c, int n); //matrix add: c=a+b int main() { dzmat_t a,b,c; int is,ie; is=0; ie=N/2-1; //top N/2 rows on device 0 #pragma omp target map(mapper(top_id), tofrom: a,b) device(0) \ firstprivate(is,ie) nowait { dzmat_init(&a,is,ie,N); dzmat_init(&b,is,ie,N); } is=N/2; ie=N-1; //bottom N/2 rows on device 1 #pragma omp target map(mapper(bottom_id), tofrom: a,b) device(1) \ firstprivate(is,ie) nowait { dzmat_init(&a,is,ie,N); dzmat_init(&b,is,ie,N); } #pragma omp taskwait host_add(&a,&b,&c,N); }
pchetrf_aasen.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzhetrf_aasen.c, normal z -> c, Fri Sep 28 17:38:12 2018 * **/ #include <math.h> #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) ((plasma_complex32_t*)plasma_tile_addr(A, (m), (n))) #define L(m, n) ((plasma_complex32_t*)plasma_tile_addr(A, (m), (n)-1)) #define U(m, n) ((plasma_complex32_t*)plasma_tile_addr(A, (m)-1, (n))) #define T(m, n) ((plasma_complex32_t*)(W4((m)+((m)-(n))*A.mt))) #define Tgb(m, n) ((plasma_complex32_t*)plasma_tile_addr(T, (m), (n))) // W(m): nb*nb used to compute T(k,k) #define W(m) ((plasma_complex32_t*)plasma_tile_addr(W, (m), 0)) // W2(m): mt*(nb*nb) to store H #define W2(m) ((plasma_complex32_t*)plasma_tile_addr(W2, (m), 0)) // W3(m): mt*(nb*nb) used to form T(k,k) #define W3(m) ((plasma_complex32_t*)plasma_tile_addr(W3, (m), 0)) // W4(m): mt*(nb*nb) used to store off-diagonal T #define W4(m) ((plasma_complex32_t*)plasma_tile_addr(W4, (m), 0)) // W5(m): wmt used to update L(:,k) #define W5(m) ((plasma_complex32_t*)plasma_tile_addr(W5, (m), 0)) #define H(m, n) (uplo == PlasmaLower ? W2((m)) : W2((n))) #define IPIV(i) (ipiv + (i)*(A.mb)) /***************************************************************************//** * Parallel tile LDLt factorization. * @see plasma_omp_chetrf_aasen ******************************************************************************/ void plasma_pchetrf_aasen(plasma_enum_t uplo, plasma_desc_t A, int *ipiv, plasma_desc_t T, plasma_desc_t W, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; // Read parameters from the context. plasma_context_t *plasma = plasma_context_self(); int ib = plasma->ib; int wmt = W.mt-(1+4*A.mt); // Creaet views for the workspaces plasma_desc_t W2 = plasma_desc_view(W, A.mb, 0, A.mt*A.mb, A.nb); plasma_desc_t W3 = plasma_desc_view(W, (1+1*A.mt)*A.mb, 0, A.mt*A.mb, A.nb); plasma_desc_t W4 = plasma_desc_view(W, (1+2*A.mt)*A.mb, 0, 2*A.mt*A.mb, A.nb); plasma_desc_t W5 = plasma_desc_view(W, (1+4*A.mt)*A.mb, 0, wmt*A.mb, A.nb); //============== // PlasmaLower //============== // NOTE: In old PLASMA, we used priority. if (uplo == PlasmaLower) { for (int k = 0; k < A.mt; k++) { int nvak = plasma_tile_nview(A, k); int mvak = plasma_tile_mview(A, k); int ldak = plasma_tile_mmain(A, k); int ldtk = plasma_tile_mmain(W4, k); // -- computing offdiagonals H(1:k-1, k) -- // for (int m=1; m < k; m++) { int mvam = plasma_tile_mview(A, m); int ldtm = plasma_tile_mmain(W4, m); plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaConjTrans, mvam, mvak, mvam, 1.0, T(m, m), ldtm, L(k, m), ldak, 0.0, H(m, k), A.mb, sequence, request); if (m > 1) { plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaConjTrans, mvam, mvak, A.mb, 1.0, T(m, m-1), ldtm, L(k, m-1), ldak, 1.0, H(m, k), A.mb, sequence, request); } int mvamp1 = plasma_tile_mview(A, m+1); int ldtmp1 = plasma_tile_mmain(W4, m+1); plasma_core_omp_cgemm( PlasmaConjTrans, PlasmaConjTrans, mvam, mvak, mvamp1, 1.0, T(m+1, m), ldtmp1, L(k, m+1), ldak, 1.0, H(m, k), A.mb, sequence, request); } // ---- end of computing H(1:(k-1),k) -- // // -- computing diagonal T(k, k) -- // plasma_complex32_t beta; if (k > 1) { int num = k-1; for (int m = 1; m < k; m++) { int mvam = plasma_tile_mview(A, m); int id = (m-1) % num; if (m < num+1) beta = 0.0; else beta = 1.0; plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaNoTrans, mvak, mvak, mvam, -1.0, L(k, m), ldak, H(m, k), A.mb, beta, W3(id), A.mb, sequence, request); } // all-reduce W3 using a binary tree // // NOTE: Old PLASMA had an option to reduce in a set of tiles // // num_players: number of players int num_players = num; // num_rounds : height of tournament int num_rounds = ceil( log10((float)num_players)/log10(2.0) ); // base: intervals between brackets int base = 2; for (int round = 1; round <= num_rounds; round++) { int num_brackets = num_players / 2; // number of brackets for (int bracket = 0; bracket < num_brackets; bracket++) { // first contender int m1 = base*bracket; // second contender int m2 = m1+base/2; plasma_core_omp_cgeadd( PlasmaNoTrans, mvak, mvak, 1.0, W3(m2), A.mb, 1.0, W3(m1), A.mb, sequence, request); } num_players = ceil( ((float)num_players)/2.0 ); base = 2*base; } plasma_core_omp_clacpy( PlasmaLower, PlasmaNoTrans, mvak, mvak, A(k, k), ldak, T(k, k), ldtk, sequence, request); plasma_core_omp_cgeadd( PlasmaNoTrans, mvak, mvak, 1.0, W3(0), A.mb, 1.0, T(k, k), ldtk, sequence, request); } else { // k == 0 or 1 plasma_core_omp_clacpy( PlasmaLower, PlasmaNoTrans, mvak, mvak, A(k, k), ldak, T(k, k), ldtk, sequence, request); // expanding to full matrix plasma_core_omp_clacpy( PlasmaLower, PlasmaConjTrans, mvak, mvak, T(k, k), ldtk, T(k, k), ldtk, sequence, request); } if (k > 0) { if (k > 1) { plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaNoTrans, mvak, A.mb, mvak, 1.0, L(k, k), ldak, T(k, k-1), ldtk, 0.0, W(0), A.mb, sequence, request); plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaConjTrans, mvak, mvak, A.mb, -1.0, W(0), A.mb, L(k, k-1), ldak, 1.0, T(k, k), ldtk, sequence, request); } // - symmetrically solve with L(k,k) // plasma_core_omp_chegst( 1, PlasmaLower, mvak, T(k, k), ldtk, L(k, k), ldak, sequence, request); // expand to full matrix plasma_core_omp_clacpy( PlasmaLower, PlasmaConjTrans, mvak, mvak, T(k, k), ldtk, T(k, k), ldtk, sequence, request); } // computing H(k, k) // beta = 0.0; if (k > 1) { plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaConjTrans, mvak, mvak, A.nb, 1.0, T(k, k-1), ldtk, L(k, k-1), ldak, 0.0, H(k, k), A.mb, sequence, request); beta = 1.0; } // computing the (k+1)-th column of L // if (k+1 < A.nt) { int ldakp1 = plasma_tile_mmain(A, k+1); if (k > 0) { // finish computing H(k, k) // plasma_core_omp_cgemm( PlasmaNoTrans, PlasmaConjTrans, mvak, mvak, mvak, 1.0, T(k, k), ldtk, L(k, k), ldak, beta, H(k, k), A.mb, sequence, request); // computing the (k+1)-th column of L // // - update with the previous column if (A.mt-k < plasma->max_threads && k > 0) { int num = imin(k, wmt/(A.mt-k-1)); // workspace per row for (int n = 1; n <= k; n++) { // update A(k+1:mt-1, k) using L(:,n) // plasma_complex32_t *a1, *a2, *b, *c; int ma1 = (A.mt-k)*A.mb; int ma2 = plasma_tile_mmain(A, A.mt-1); int mc = (A.mt-(k+1))*A.mb; int na = plasma_tile_nmain(A, k); a1 = L(k, n); // we need only L(k+1:mt-1, n) a2 = L(k, A.mt-1); // left-over tile b = H(n, k); c = W5(((n-1)%num)*(A.mt-k-1)); int mvan = plasma_tile_mview(A, n); #pragma omp task depend(in:a1[0:ma1*na]) \ depend(in:a2[0:ma2*na]) \ depend(in:b[0:A.mb*mvak]) \ depend(inout:c[0:mc*A.mb]) { for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); int id = (m-k-1)+((n-1)%num)*(A.mt-k-1); if (n < num+1) beta = 0.0; else beta = 1.0; #pragma omp task { plasma_core_cgemm( PlasmaNoTrans, PlasmaNoTrans, mvam, mvak, mvan, -1.0, L(m, n), ldam, H(n, k), A.mb, beta, W5(id), A.mb); } } #pragma omp taskwait } } // accumerate within workspace using a binary tree // num_players: number of players int num_players = num; // num_rounds: height of tournament int num_rounds = ceil( log10((float)num_players)/log10(2.0) ); // base: intervals between brackets int base = 2; for (int round = 1; round <= num_rounds; round++) { int num_brackets = num_players / 2; // number of brackets for (int bracket = 0; bracket < num_brackets; bracket++) { // first contender int m1 = base*bracket; // second contender int m2 = m1+base/2; plasma_complex32_t *c1, *c2; int mc = (A.mt-(k+1))*A.mb; c1 = W5(m1*(A.mt-k-1)); c2 = W5(m2*(A.mt-k-1)); #pragma omp task depend(in:c2[0:mc*A.mb]) \ depend(inout:c1[0:mc*A.mb]) { for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); #pragma omp task { plasma_core_cgeadd( PlasmaNoTrans, mvam, mvak, 1.0, W5((m-k-1)+m2*(A.mt-k-1)), A.mb, 1.0, W5((m-k-1)+m1*(A.mt-k-1)), A.mb); } } #pragma omp taskwait } } num_players = ceil( ((float)num_players)/2.0 ); base = 2*base; } // accumelate into A(k+1:mt-1, k) { plasma_complex32_t *c1; plasma_complex32_t *c2_in; plasma_complex32_t *c3_in; plasma_complex32_t *c2_out; int mc = (A.mt-(k+1))*A.mb; int mc2_in = (A.mt-k)*A.mb; int mc3_in = plasma_tile_mmain(A, A.mt-1); int mc2_out = (A.mt-(k+1))*A.mb; int nc2 = plasma_tile_nmain(A, k); c1 = W5(0); c2_in = A(k, k); // we write only A(k+1,k), but dependency from sym-swap c3_in = A(A.mt-1, k); // left-over tile c2_out = A(k+1, k); #pragma omp task depend(in:c1[0:mc*A.mb]) \ depend(in:c2_in[0:mc2_in*nc2]) \ depend(in:c3_in[0:mc3_in*nc2]) \ depend(out:c2_out[0:mc2_out*nc2]) { for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); #pragma omp task { plasma_core_cgeadd( PlasmaNoTrans, mvam, mvak, 1.0, W5(m-k-1), A.mb, 1.0, A(m, k), ldam); } } #pragma omp taskwait } } } else { for (int n = 1; n <= k; n++) { // update L(:,k+1) using L(:,n) // plasma_complex32_t *a1, *a2; plasma_complex32_t *b; plasma_complex32_t *c1_in, *c2_in; plasma_complex32_t *c_out; int ma1 = (A.mt-k)*A.mb; int ma2 = plasma_tile_mmain(A, A.mt-1); int mc1_in = (A.mt-k)*A.mb; int mc2_in = plasma_tile_mmain(A, A.mt-1); int mc_out = (A.mt-(k+1))*A.mb; int na = plasma_tile_nmain(A, k); int nc = plasma_tile_nmain(A, k); a1 = L(k, n); // we read only L(k+1:mt-1, n), dependency from row-swap a2 = L(A.mt-1, n); // left-over tile b = H(n, k); c1_in = A(k, k); // we write only A(k+1,k), but dependency from sym-swap c2_in = A(A.mt-1, k); // left-over tile c_out = A(k+1, k); int mvan = plasma_tile_mview(A, n); #pragma omp task depend(in:a1[0:ma1*na]) \ depend(in:a2[0:ma2*na]) \ depend(in:b[0:A.mb*mvak]) \ depend(in:c1_in[0:mc1_in*nc]) \ depend(in:c2_in[0:mc2_in*nc]) \ depend(out:c_out[0:mc_out*nc]) { for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); #pragma omp task { plasma_core_cgemm( PlasmaNoTrans, PlasmaNoTrans, mvam, mvak, mvan, -1.0, L(m, n), ldam, H(n, k), A.mb, 1.0, A(m, k), ldam); } } #pragma omp taskwait } } } } // end of if (k > 0) // ============================= // // == PLASMA LU panel == // // ============================= // // -- compute LU of the panel -- // plasma_complex32_t *a1, *a2; a1 = L(k+1, k+1); a2 = L(A.mt-1, k+1); int mlkk = A.m - (k+1)*A.mb; // dimension int ma1 = (A.mt-(k+1)-1)*A.mb; int ma2 = plasma_tile_mmain(A, A.mt-1); int na = plasma_tile_nmain(A, k); int k1 = 1+(k+1)*A.nb; int k2 = imin(mlkk, mvak)+(k+1)*A.nb; int num_panel_threads = imin(plasma->max_panel_threads, A.mt-(k+1)); #pragma omp task depend(inout:a1[0:ma1*na]) \ depend(inout:a2[0:ma2*na]) \ depend(out:ipiv[k1-1:k2]) { volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int)); if (max_idx == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile plasma_complex32_t *max_val = (plasma_complex32_t*)malloc(num_panel_threads*sizeof( plasma_complex32_t)); if (max_val == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile int info = 0; plasma_barrier_t barrier; plasma_barrier_init(&barrier); if (sequence->status == PlasmaSuccess) { for (int rank = 0; rank < num_panel_threads; rank++) { #pragma omp task shared(barrier) { plasma_desc_t view = plasma_desc_view(A, (k+1)*A.mb, k*A.nb, mlkk, mvak); plasma_core_cgetrf(view, IPIV(k+1), ib, rank, num_panel_threads, max_idx, max_val, &info, &barrier); if (info != 0) plasma_request_fail(sequence, request, (k+1)*A.mb+info); } } } #pragma omp taskwait free((void*)max_idx); free((void*)max_val); { for (int i = 0; i < imin(mlkk, mvak); i++) { IPIV(k+1)[i] += (k+1)*A.mb; } } } // ============================== // // == end of PLASMA LU panel == // // ============================== // // -- apply pivoting to previous columns of L -- // for (int n = 1; n < k+1; n++) { ma1 = (A.mt-k-1)*A.mb; ma2 = plasma_tile_mmain(A, A.mt-1); na = plasma_tile_nmain(A, n-1); a1 = L(k+1, n); a2 = L(A.mt-1, n); #pragma omp task depend(in:ipiv[(k1-1):k2]) \ depend(inout:a1[0:ma1*na]) \ depend(inout:a2[0:ma2*na]) { if (sequence->status == PlasmaSuccess) { plasma_desc_t view = plasma_desc_view(A, 0, (n-1)*A.nb, A.m, na); plasma_core_cgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); } } } // computing T(k+1, k) // int mvakp1 = plasma_tile_mview(A, k+1); int ldak_n = plasma_tile_nmain(A, k); int ldtkp1 = plasma_tile_mmain(W4, k+1); // copy upper-triangular part of L(k+1,k+1) to T(k+1,k) // and then zero it out plasma_core_omp_clacpy( PlasmaUpper, PlasmaNoTrans, mvakp1, mvak, L(k+1, k+1), ldakp1, T(k+1, k ), ldtkp1, sequence, request); plasma_core_omp_claset( PlasmaUpper, ldakp1, ldak_n, 0, 0, mvakp1, mvak, 0.0, 1.0, L(k+1, k+1)); if (k > 0) { plasma_core_omp_ctrsm( PlasmaRight, PlasmaLower, PlasmaConjTrans, PlasmaUnit, mvakp1, mvak, 1.0, L(k, k), ldak, T(k+1, k), ldtkp1, sequence, request); } // -- symmetrically apply pivoting to trailing A -- // { int mnt1, mnt2; mnt2 = A.mt-1-(k+1); ma2 = plasma_tile_mmain(A, A.mt-1); mnt1 = (mnt2*(mnt2+1))/2; // # of remaining tiles in a1 mnt1 *= A.mb*A.mb; mnt2 *= A.mb*ma2; // a2 mnt2 += ma2*ma2; // last diagonal a1 = A(k+1, k+1); a2 = A(A.mt-1, k+1); int num_swap_threads = imin(plasma->max_panel_threads, A.mt-(k+1)); #pragma omp task depend(in:ipiv[(k1-1):k2]) \ depend(inout:a1[0:mnt1]) \ depend(inout:a2[0:mnt2]) { plasma_barrier_t barrier; plasma_barrier_init(&barrier); for (int rank = 0; rank < num_swap_threads; rank++) { #pragma omp task shared(barrier) { plasma_core_cheswp(rank, num_swap_threads, PlasmaLower, A, k1, k2, ipiv, 1, &barrier); } } #pragma omp taskwait } } } // copy T(k+1, k) to T(k, k+1) for cgbtrf, // forcing T(k, k) and T(k+1, k) tiles are ready plasma_complex32_t *Tin10 = NULL; plasma_complex32_t *Tin11 = NULL; plasma_complex32_t *Tin21 = NULL; plasma_complex32_t *Tout01 = NULL; plasma_complex32_t *Tout11 = NULL; plasma_complex32_t *Tout21 = NULL; plasma_complex32_t *Tout = NULL; int km1 = imax(0, k-1); int kp1 = imin(k+1, A.mt-1); int ldtkm1 = plasma_tile_mmain(W4, km1); int ldtkp1 = plasma_tile_mmain(W4, kp1); int nvakp1 = plasma_tile_nview(A, kp1); Tin10 = T(k, km1); Tin11 = T(k, k); Tin21 = T(kp1, k); Tout01 = Tgb(km1, k); Tout11 = Tgb(k, k); Tout21 = Tgb(kp1, k); Tout = Tgb(imax(0, k-T.kut+1), k); #pragma omp task depend(in:Tin10[0:A.mb*ldtk]) \ depend(in:Tin11[0:nvak*ldtk]) \ depend(in:Tin21[0:nvakp1*ldtkp1]) \ depend(out:Tout01[0:nvak*ldtkm1]) \ depend(out:Tout11[0:nvak*ldtk]) \ depend(out:Tout21[0:nvak*ldtkp1]) \ depend(out:Tout[0:nvak*ldtkp1]) { plasma_core_clacpy( PlasmaGeneral, PlasmaNoTrans, mvak, mvak, T(k, k), ldtk, Tgb(k, k), ldtk); if (k+1 < T.nt) { int mvakp1 = plasma_tile_mview(A, k+1); plasma_core_clacpy( PlasmaGeneral, PlasmaNoTrans, mvakp1, mvak, T(kp1, k), ldtkp1, Tgb(kp1, k), ldtkp1); } if (k > 0) { int nvakm1 = plasma_tile_nview(A, k-1); plasma_core_clacpy( PlasmaGeneral, PlasmaConjTrans, mvak, nvakm1, T(k, km1), ldtk, Tgb(km1, k), ldtkm1); } } } } //============== // PlasmaUpper //============== else { // TODO: Upper } }
GB_unop__expm1_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fp64_fp64) // op(A') function: GB (_unop_tran__expm1_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = expm1 (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = expm1 (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = expm1 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = expm1 (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = expm1 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_sort.h
#include "utility.h" #ifndef PARALLEL_SORT_STL_H #define PARALLEL_SORT_STL_H namespace internal { std::size_t g_depth = 0L; // The lower cutoff boundary const std::size_t cutoff_low = 100; // The higher cutoff boundary const std::size_t cutoff_high = 1000; template<class BidirIt, class _Pred> void sort3v(BidirIt _First, BidirIt _Mid, BidirIt _Last, _Pred compare) { // If the last data item is less than the first, exchange these both items if (compare(*_Last, *_First)) std::iter_swap(_First, _Last); // If the middle data item is less than the first, exchange these both items if (compare(*_Mid, *_First)) std::iter_swap(_First, _Mid); // If the last data item is less than the middle, exchange these both items if (compare(*_Last, *_Mid)) std::iter_swap(_Mid, _Last); } template<class BidirIt> BidirIt med3v(BidirIt _First, BidirIt _Mid, BidirIt _Last) { if ((*_First <= *_Mid && *_First >= *_Last) || (*_First <= *_Last && *_First >= *_Mid)) // Return the first value if it is less than or equal to the middle value return _First; else if ((*_Mid <= *_First && *_Mid >= *_Last) || (*_Mid <= *_Last && *_Mid >= *_First)) // Return the middle value if it is less than or equal to the first value return _Mid; else if ((*_Last <= *_First && *_Last >= *_Mid) || (*_Last <= *_Mid && *_Last >= *_First)) // Return the last value if it is less than or equal to the first value return _Last; return _First; } template<class BidirIt> BidirIt med9v(BidirIt _First, BidirIt _Mid, BidirIt _Last) { std::size_t _Size = 0L; std::vector<BidirIt> median; median.resize(9); // Compute the size of each fragment of the array if ((_Size = (std::distance(_First, _Last) / 9)) >= 3) { //#pragma ivdep //#pragma vector always // Iterate through the array and copy each item // located at the specific position to the temporary array for (int index = 0; index < 9; index++) median[index] = _First + _Size * index; // Find the median of the first fragment BidirIt _Med1 = med3v(median[0], median[1], median[2]); // Find the median of the second fragment BidirIt _Med2 = med3v(median[3], median[4], median[5]); // Find the median of the third fragment BidirIt _Med3 = med3v(median[6], median[7], median[8]); // Compute the final value of the median-of-nine return med3v(_Med1, _Med2, _Med3); } else { return med3v(_First, _Mid, _Last); } } template<class BidirIt, class _Pred> void adjacent_sort(BidirIt _First, BidirIt _Last, _Pred compare) { // Iterate through the array of items in parallel #pragma omp parallel for for (auto _FwdIt = _First; _FwdIt != _Last - 1; _FwdIt++) { // For each item perform a check if the following item // is greater than the next adjacent item. If so, exchange these items if (!compare(*_FwdIt, *(_FwdIt + 1)) && *_FwdIt != *(_FwdIt + 1)) std::iter_swap(_FwdIt, _FwdIt + 1); } } template<class BidirIt, class _Pred> void shaker_sort(BidirIt _First, BidirIt _Last, _Pred compare) { BidirIt _LeftIt = _First, _RightIt = _Last, \ _MidIt = _LeftIt + (_RightIt - _LeftIt) / 2; // Do the cocktail shaker sort by exchanging the data items from left and right while (!compare(*_LeftIt, *_RightIt) && _RightIt > _MidIt) std::iter_swap(_LeftIt++, _RightIt--); } template<class BidirIt, class _Pred> void do_insertion(BidirIt _First, BidirIt _RevIt, BidirIt _Last, _Pred compare) { typename std::iterator_traits<BidirIt>::\ value_type _Value = *(_RevIt + 1); // Iterate through the array from the current // position downto the position of the first data item // For each data item in the subset of previous data items // perform a check if it's greater or equal to the specific // value of argument while (compare(_Value, *_RevIt)) { // If so, do the insertion of the // value of argument to the specific position *(_RevIt + 1) = *_RevIt; *_RevIt = _Value; if (_RevIt <= _First) break; _RevIt--; } } template<class RandomIt, class _Pred> void insertion_sort(RandomIt _First, RandomIt _Last, _Pred compare) { RandomIt _LeftIt = _First + 1, _RightIt = _Last; // Perform the insertion sort by iterating through the array while (_LeftIt <= _RightIt) { // For each data item make the number of calls // to the function that performs the actual sorting do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++; do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++; do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++; do_insertion(_First, _LeftIt - 1, _RightIt, compare); _LeftIt++; } } template<class RanIt, class _Pred> void _qs3w(RanIt _First, RanIt _Last, _Pred compare) { // Check if the array size is not zero if (_First >= _Last) return; // Perform a check if the size of the array is equal to 1 if (std::distance(_First, _Last) == 1) { // If so, check if the value of the first item is greater // than the value of the last item. If so, exchange these both items if (*_First > *_Last) std::iter_swap(_First, _Last); // Terminate the process of sorting return; } // Compute the size of the array to be sorted std::size_t _Size = 0L; g_depth++; if ((_Size = std::distance(_First, _Last)) > 0) { // Compute the middle of the array to be sorted RanIt _LeftIt = _First, _RightIt = _Last, _MidIt = _First + (_Last - _First) / 2; bool is_swapped_left = false, is_swapped_right = false; // Compute the value of median by using median-of-nine algorithm RanIt _Median = internal::med9v(_LeftIt, _MidIt, _RightIt); // Obtain the value of pivot equal to the value of median typename std::iterator_traits<RanIt>::value_type _Pivot = *_Median; // Iterate through the array to be sorted and for each item // perform a check if it's less than the value of pivot for (RanIt _FwdIt = _LeftIt; _FwdIt <= _RightIt; _FwdIt++) { // Check if the value of the item is less than the value of pivot if (compare(*_FwdIt, _Pivot)) { // If so, exchange the current data item with the next item from left is_swapped_left = true; std::iter_swap(_FwdIt, _LeftIt); // Increment the value of pointer to the succeeding data item from left _LeftIt++; } // Check if the value of the item is greater than the value of pivot if (compare(_Pivot, *_FwdIt)) { is_swapped_right = true; // If so, exchange the current data item with the next item from right std::iter_swap(_FwdIt, _RightIt); // Decrement the value of pointer to the succeeding data item from right // and the pointer to the current data item in the array _RightIt--; _FwdIt--; } } // Perform a check if the size is greater than // the value of the higher cutting off boundary if (_Size >= internal::cutoff_high) { // If so, launch parallel tasks to sort // the either leftmost or rightmost part of the array #pragma omp task untied mergeable if (std::distance(_First, _LeftIt) > 0 && is_swapped_left) internal::_qs3w(_First, _LeftIt, compare); #pragma omp task untied mergeable if (std::distance(_RightIt, _Last) > 0 && is_swapped_right) internal::_qs3w(_RightIt, _Last, compare); } else { // Otherwise, merge the two concurrent parallel tasks into a single parallel task // in which the leftmost and rightmost parts are sorted sequentially #pragma omp parallel num_threads(12) #pragma omp single nowait { if (std::distance(_First, _LeftIt) > 0 && is_swapped_left) internal::_qs3w(_First, _LeftIt, compare); if (std::distance(_RightIt, _Last) > 0 && is_swapped_right) internal::_qs3w(_RightIt, _Last, compare); } } } } template<class BidirIt, class _Pred > inline std::pair<BidirIt, BidirIt> partition(BidirIt _First, \ BidirIt _Last, _Pred compare) { std::size_t _Size = 0L; // Compute the actual size of the array. If the size is equal to 1 // then perform a tiny sort by exchanging the adjacent data items if ((_Size = std::distance(_First, _Last)) == 1) { if (!compare(*_First, *_Last)) std::iter_swap(_First, _Last); // Return the pair of pointers to the two subsequent // partitions in the array to be sorted return std::make_pair(_First, _Last); } if (_Size == 2) { // If the array size is equal to 2, perform a tiny sort by sorting three values. internal::sort3v(_First, _First + 1, _Last, compare); // Return the pair of pointers to the two subsequent // partitions in the array to be sorted return std::make_pair(_First, _Last); } // Compute the middle of the array to be sorted BidirIt _LeftIt = _First, _RightIt = _Last, \ _MidIt = _LeftIt + _Size / 2; // Perform a tiny sort of three values // at the beginning, middle and the end of the array internal::sort3v(_LeftIt, _MidIt, _RightIt, compare); // Perform a tiny sort of three values at the middle of the array internal::sort3v(_MidIt - 1, _MidIt, _MidIt + 1, compare); // Compute the median by using median-of-nine algorithm BidirIt _Median = internal::med9v(_LeftIt, _MidIt, _RightIt); // Obtain the value of pivot based on the value of median typename std::iterator_traits<BidirIt>::value_type _Pivot = *_Median; // Perform a check if the first data item is equal to the value of median if (*_First == _Pivot) // If so, swap it with the middle data item std::iter_swap(_First, _MidIt); // Perform a check if the last data item is equal to the value of median if (*_Last == _Pivot) // If so, swap it with the middle data item std::iter_swap(_Last, _MidIt); // Perform partitioning iteratively until we've re-arranged the array to be sorted // and obtained the pointers to the leftmost and rightmost partitions. while (_LeftIt <= _RightIt) { // Iterate through the array from left and for each data item // perform a check if it's greater than the value of pivot for (; _LeftIt <= _Last; _LeftIt++) { // Perform a check if the value of the current // data item is greater than the value of pivot or is equal to it if (compare(_Pivot, *_LeftIt) || _Pivot == *_LeftIt) { // If so, perform the iteration through the array from the right // until we've reached the data item which is already found from left // For each item perform a check if it's not less than the value of pivot for (; _RightIt >= _LeftIt; _RightIt--) { // If the current item's value is less than the value of pivot or // at least equal to it, exchange the specific data item being found if (compare(*_RightIt, _Pivot) || _Pivot == *_RightIt) { // Perform a check if the value of the pointer obtained from left // is less than or equal to the value of pointer from the right if (_LeftIt <= _RightIt) { // If so, exchange the specific data items std::iter_swap(_LeftIt, _RightIt); // Increment the pointer to the left item and // decrement the pointer to the right item _LeftIt++; _RightIt--; } // Terminate the loop execution break; } } // Terminate the loop execution break; } } } // Return the pair of pointers to the two subsequent // partitions in the array to be sorted return std::make_pair(_LeftIt, _RightIt); } template<class BidirIt, class _Pred > void intro_sort(BidirIt _First, BidirIt _Last, _Pred compare) { // Check if the array size is not zero if (_First >= _Last) return; std::size_t pos = 0L; g_depth++; // Perform a check if the array has already been sorted. // If so terminate the process of sorting if (misc::sorted(_First, _Last, pos, compare)) return; std::size_t _Size = 0L; // Compute the actual size of the array and perform a check // if the size does not exceed a lower cutting off boundary if ((_Size = std::distance(_First, _Last)) > internal::cutoff_low) { BidirIt _LeftIt = _First, _RightIt = _Last; // If so, partition the array by using Hoare's quicksort partitioning std::pair<BidirIt, BidirIt> p \ = internal::partition(_First, _Last, compare); // Perform a check if the size of the array does not // exceed the higher cutting off boundary if (_Size > internal::cutoff_high) { // If not, launch the first parallel task // that will perform the improved 3-way quicksort at the backend #pragma omp task untied mergeable // Perform a check if the size of the partition is not zero and // we're not performing an empty null-task if (std::distance(p.first, _Last) > 0) internal::_qs3w(p.first, _Last, compare); // If not, launch the second parallel task // that will perform the improved 3-way quicksort at the backend #pragma omp task untied mergeable // Perform a check if the size of the partition is not zero and // we're not performing an empty null-task if (std::distance(_First, p.second) > 0) internal::_qs3w(_First, p.second, compare); } else { // Otherwise, merge the two concurrent parallel tasks into a single parallel task // in which calls to the sorter routine are performed sequentially #pragma omp parallel num_threads(12) #pragma omp single nowait { if (std::distance(p.first, _Last) > 0) internal::_qs3w(p.first, _Last, compare); if (std::distance(_First, p.second) > 0) internal::_qs3w(_First, p.second, compare); } } } else { // If the size of the array is less than 10^2, // perform a regular insertion sort launched // during the parallel task execution #pragma omp parallel num_threads(12) #pragma omp single nowait { if (std::distance(_First, _Last) > 0) internal::insertion_sort(_First, _Last + 1, compare); } } } template<class BidirIt, class _Pred > void parallel_sort1(BidirIt _First, BidirIt _Last, _Pred compare) { // Compute the potential size of the array to be sorted std::size_t _Size = std::distance(_First, _Last); // Find the value of the maximum data item in the array typename std::iterator_traits<BidirIt>::value_type \ _MaxValue = *std::max_element(_First, _Last); // Compute the radix MSD position for the size value std::size_t _MaxSizeRadix = std::log10((double)_Size); // Compute the radix MSD position for the maximum value std::size_t _MaxValueRadix = std::log10((double)_MaxValue); // Perform a check if the MSD position of the size is greater // (e.g. the array is an interleave sequence) if (_MaxSizeRadix > _MaxValueRadix) { // Partition the entire array into chunks of a fixed size // based on finding sub-intervals for each particular chunk misc::partitioner p(std::make_pair(0, _Size), \ omp_get_max_threads()); // Execute a parallel region in which the introspective sorter // function is invoked for each particular chunks to be sorted #pragma omp parallel num_threads(12) { volatile int tid = omp_get_thread_num(); internal::intro_sort(_First + p[tid].first(), _First + p[tid].second(), compare); } // Perform a parallel task to perform a final sort that // will arrange the entire array into an ordered sequence #pragma omp parallel num_threads(12) #pragma omp master internal::intro_sort(_First, _Last - 1, compare); } else { // Otherwise, launch a parallel task to perform // an introspective sort of the entire array #pragma omp parallel num_threads(12) #pragma omp master internal::intro_sort(_First, _Last - 1, compare); } } template<class BidirIt, class _Pred > void parallel_sort(BidirIt _First, BidirIt _Last, _Pred compare) { std::size_t pos = 0L; g_depth = 0L; // Compute the potential size of the array to be sorted std::size_t _Size = std::distance(_First, _Last); // Perform the parallel cocktail shaker sort #pragma omp task untied mergeable internal::shaker_sort(_First, _Last - 1, compare); // Synchronize threads until the parallel task has completed its execution #pragma omp taskwait // Let's give a first chance check if the array has already been sorted and // obtain the position of the first unsorted data item if (!misc::sorted(_First, _Last, pos, compare)) { BidirIt _LeftIt = _First, _RightIt = _First + pos; // Compute the radix MSD position for the size value std::size_t _MaxSizeRadix = std::log10((double)_Size); // Compute the radix MSD position for the maximum value std::size_t _MaxValueRadix = std::log10((double)*_RightIt); // Perform a check if the MSD position of the size is greater if (_MaxSizeRadix > _MaxValueRadix) { // If so, perform a check if the value of the first data item is // equal to the value of the first unsorted item that occurs in the array // (e.g. the array is an interleave sequence) if (*_LeftIt == *(_RightIt + 1)) { // Perform the parallel task that executes // the 3-way quicksort routine at the backend #pragma omp parallel num_threads(12) #pragma omp master internal::_qs3w(_First, _Last - 1, compare); // Terminate the process of sorting. return; } } // Otherwise, if the array is not an interleave sequence // execute a loop to sort the array by performing the introspective sort do { // Perform the pre-sorting of the array by using adjacent sort internal::adjacent_sort(_First, _Last, compare); // Perform the actual sorting by launching the introspective sort internal::parallel_sort1(_First, _Last, compare); // Perform the last chance check if the array has already been sorted // If not, proceed with the process of sorting over again until the entire array is sorted } while (!misc::sorted(_First, _Last, pos, compare)); } } } #endif // PARALLEL_SORT_STL_H
cp-tree.h
/* Definitions for C++ parsing and type checking. Copyright (C) 1987-2018 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "tm.h" #include "hard-reg-set.h" #include "function.h" /* In order for the format checking to accept the C++ front end diagnostic framework extensions, you must include this file before diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) #error \ In order for the format checking to accept the C++ front end diagnostic \ framework extensions, you must include this file before diagnostic-core.h and \ c-common.h, not after. #endif #include "c-family/c-common.h" #include "diagnostic.h" /* A tree node, together with a location, so that we can track locations (and ranges) during parsing. The location is redundant for node kinds that have locations, but not all node kinds do (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ class cp_expr { public: cp_expr () : m_value (NULL), m_loc (UNKNOWN_LOCATION) {} cp_expr (tree value) : m_value (value), m_loc (EXPR_LOCATION (m_value)) {} cp_expr (tree value, location_t loc): m_value (value), m_loc (loc) {} cp_expr (const cp_expr &other) : m_value (other.m_value), m_loc (other.m_loc) {} /* Implicit conversions to tree. */ operator tree () const { return m_value; } tree & operator* () { return m_value; } tree operator* () const { return m_value; } tree & operator-> () { return m_value; } tree operator-> () const { return m_value; } tree get_value () const { return m_value; } location_t get_location () const { return m_loc; } location_t get_start () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_start; } location_t get_finish () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_finish; } void set_location (location_t loc) { protected_set_expr_location (m_value, loc); m_loc = loc; } void set_range (location_t start, location_t finish) { set_location (make_location (m_loc, start, finish)); } cp_expr& maybe_add_location_wrapper () { m_value = maybe_wrap_with_location (m_value, m_loc); return *this; } private: tree m_value; location_t m_loc; }; inline bool operator == (const cp_expr &lhs, tree rhs) { return lhs.get_value () == rhs; } enum cp_tree_index { CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_INIT_LIST_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_GLOBAL, CPTI_GLOBAL_TYPE, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_AGGR_TAG, CPTI_CONV_OP_MARKER, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_CONV_OP_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_GLOBAL_IDENTIFIER, CPTI_STD_IDENTIFIER, CPTI_ANON_IDENTIFIER, CPTI_AUTO_IDENTIFIER, CPTI_DECLTYPE_AUTO_IDENTIFIER, CPTI_INIT_LIST_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_EMPTY_EXCEPT_SPEC, CPTI_NOEXCEPT_TRUE_SPEC, CPTI_NOEXCEPT_FALSE_SPEC, CPTI_NOEXCEPT_DEFERRED_SPEC, CPTI_TERMINATE_FN, CPTI_CALL_UNEXPECTED_FN, CPTI_GET_EXCEPTION_PTR_FN, CPTI_BEGIN_CATCH_FN, CPTI_END_CATCH_FN, CPTI_ALLOCATE_EXCEPTION_FN, CPTI_FREE_EXCEPTION_FN, CPTI_THROW_FN, CPTI_RETHROW_FN, CPTI_ATEXIT_FN_PTR_TYPE, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_NULLPTR, CPTI_NULLPTR_TYPE, CPTI_ALIGN_TYPE, CPTI_ANY_TARG, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define global_namespace cp_global_trees[CPTI_GLOBAL] #define global_type_node cp_global_trees[CPTI_GLOBAL_TYPE] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] #define nullptr_node cp_global_trees[CPTI_NULLPTR] #define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] /* std::align_val_t */ #define align_type_node cp_global_trees[CPTI_ALIGN_TYPE] /* We cache these tree nodes so as to call get_identifier less frequently. For identifiers for functions, including special member functions such as ctors and assignment operators, the nodes can be used (among other things) to iterate over their overloads defined by/for a type. For example: tree ovlid = assign_op_identifier; tree overloads = get_class_binding (type, ovlid); for (ovl_iterator it (overloads); it; ++it) { ... } iterates over the set of implicitly and explicitly defined overloads of the assignment operator for type (including the copy and move assignment operators, whether deleted or not). */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] #define ovl_op_identifier(ISASS, CODE) (OVL_OP_INFO(ISASS, CODE)->identifier) #define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier) #define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier) /* The name used for conversion operators -- but note that actual conversion functions use special identifiers outside the identifier table. */ #define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the ::, std & anon namespaces. */ #define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER] #define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER] #define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER] /* auto and declspec(auto) identifiers. */ #define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER] #define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER] #define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] /* Exception specifiers used for throw(), noexcept(true), noexcept(false) and deferred noexcept. We rely on these being uncloned. */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] #define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] #define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] #define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC] /* Exception handling function declarations. */ #define terminate_fn cp_global_trees[CPTI_TERMINATE_FN] #define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN] #define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN] #define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN] #define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN] #define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN] #define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN] #define throw_fn cp_global_trees[CPTI_THROW_FN] #define rethrow_fn cp_global_trees[CPTI_RETHROW_FN] /* The type of the function-pointer argument to "__cxa_atexit" (or "std::atexit", if "__cxa_atexit" is not being used). */ #define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A node which matches any template argument. */ #define any_targ_node cp_global_trees[CPTI_ANY_TARG] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node #include "name-lookup.h" /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). COND_EXPR_IS_VEC_DELETE (in COND_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF) PAREN_STRING_LITERAL (in STRING_CST) CP_DECL_THREAD_LOCAL_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE, and OMP_TASKLOOP) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag) LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST) CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) DECL_OVERRIDE_P (in FUNCTION_DECL) IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR) TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR) CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR) PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION) TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO) SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR) COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ) WILDCARD_PACK_P (in WILDCARD_DECL) BLOCK_OUTER_CURLY_BRACE_P (in BLOCK) FOLD_EXPR_MODOP_P (*_FOLD_EXPR) IF_STMT_CONSTEXPR_P (IF_STMT) TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM) DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL) SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT) REINTERPRET_CAST_P (in NOP_EXPR) ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR) 1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) DECL_FINAL_P (in FUNCTION_DECL) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE) CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR) TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO) PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION) OVL_USING_P (in OVERLOAD) IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR) 2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE) TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) FNDECL_USED_AUTO (in FUNCTION_DECL) DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE) REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF) AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR) CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR) OVL_HIDDEN_P (in OVERLOAD) SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT) LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL) CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE) CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR) OVL_NESTED_P (in OVERLOAD) 4: IDENTIFIER_MARKED (IDENTIFIER_NODEs) TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, CALL_EXPR, or FIELD_DECL). DECL_TINFO_P (in VAR_DECL) FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) OVL_LOOKUP_P (in OVERLOAD) LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, NAMESPACE_DECL) 5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) DECL_VTABLE_OR_VTT_P (in VAR_DECL) FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR) 6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE) DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL) TYPE_MARKED_P (in _TYPE) RANGE_FOR_IVDEP (in RANGE_FOR_STMT) CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_USER_CONSTRUCTOR. 2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE) TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE) 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM) REFERENCE_VLA_OK (in REFERENCE_TYPE) 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_ERROR_REPORTED (in VAR_DECL). DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) LABEL_DECL_BREAK (in LABEL_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) USING_DECL_TYPENAME_P (in USING_DECL) DECL_VLA_CAPTURE_P (in FIELD_DECL) DECL_ARRAY_PARAMETER_P (in PARM_DECL) LABEL_DECL_CONTINUE (in LABEL_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) DECL_CONSTRAINT_VAR_P (in a PARM_DECL) TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL) DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL) LABEL_DECL_CDTOR (in LABEL_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_ANON_UNION_VAR_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) TYPE_DECL_ALIAS_P (in TYPE_DECL) 7: DECL_DEAD_FOR_LOCAL (in VAR_DECL). DECL_THUNK_P (in a member FUNCTION_DECL) DECL_NORMAL_CAPTURE_P (in FIELD_DECL) 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS. For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE. For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE, RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO, BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. If BV_LOST_PRIMARY is set, it means that this entry is for a lost primary virtual base and can be left null in the vtable. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ __typeof (NODE) const __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->u.fn.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* Language-dependent contents of an identifier. */ struct GTY(()) lang_identifier { struct c_common_identifier c_common; cxx_binding *bindings; }; /* Return a typed pointer version of T if it designates a C++ front-end identifier. */ inline lang_identifier* identifier_p (tree t) { if (TREE_CODE (t) == IDENTIFIER_NODE) return (lang_identifier*) t; return NULL; } #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct GTY(()) template_parm_index { struct tree_common common; int index; int level; int orig_level; tree decl; }; struct GTY(()) ptrmem_cst { struct tree_common common; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \ || LAMBDA_FUNCTION_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Mark the outer curly brace BLOCK. */ #define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) #define COND_EXPR_IS_VEC_DELETE(NODE) \ TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE)) /* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions are not constexprs. Other NOP_EXPRs are. */ #define REINTERPRET_CAST_P(NODE) \ TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE)) \ && flag_hosted) /* Lookup walker marking. */ #define LOOKUP_SEEN_P(NODE) TREE_VISITED(NODE) #define LOOKUP_FOUND_P(NODE) \ TREE_LANG_FLAG_4 (TREE_CHECK3(NODE,RECORD_TYPE,UNION_TYPE,NAMESPACE_DECL)) /* These two accessors should only be used by OVL manipulators. Other users should use iterators and convenience functions. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain) /* If set, this was imported in a using declaration. */ #define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE)) /* If set, this overload is a hidden decl. */ #define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE)) /* If set, this overload contains a nested overload. */ #define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE)) /* If set, this overload was constructed during lookup. */ #define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE)) /* If set, this is a persistant lookup. */ #define OVL_USED_P(NODE) TREE_USED (OVERLOAD_CHECK (NODE)) /* The first decl of an overload. */ #define OVL_FIRST(NODE) ovl_first (NODE) /* The name of the overload set. */ #define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE)) /* Whether this is a set of overloaded functions. TEMPLATE_DECLS are always wrapped in an OVERLOAD, so we don't need to check them here. */ #define OVL_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD) /* Whether this is a single member overload. */ #define OVL_SINGLE_P(NODE) \ (TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE)) /* OVL_HIDDEN_P nodes come first, then OVL_USING_P nodes, then regular fns. */ struct GTY(()) tree_overload { struct tree_common common; tree function; }; /* Iterator for a 1 dimensional overload. Permits iterating over the outer level of a 2-d overload when explicitly enabled. */ class ovl_iterator { tree ovl; const bool allow_inner; /* Only used when checking. */ public: explicit ovl_iterator (tree o, bool allow = false) : ovl (o), allow_inner (allow) { } private: /* Do not duplicate. */ ovl_iterator &operator= (const ovl_iterator &); ovl_iterator (const ovl_iterator &); public: operator bool () const { return ovl; } ovl_iterator &operator++ () { ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl); return *this; } tree operator* () const { tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl); /* Check this is not an unexpected 2-dimensional overload. */ gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD); return fn; } public: /* Whether this overload was introduced by a using decl. */ bool using_p () const { return TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl); } bool hidden_p () const { return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl); } public: tree remove_node (tree head) { return remove_node (head, ovl); } tree reveal_node (tree head) { return reveal_node (head, ovl); } protected: /* If we have a nested overload, point at the inner overload and return the next link on the outer one. */ tree maybe_push () { tree r = NULL_TREE; if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl)) { r = OVL_CHAIN (ovl); ovl = OVL_FUNCTION (ovl); } return r; } /* Restore an outer nested overload. */ void pop (tree outer) { gcc_checking_assert (!ovl); ovl = outer; } private: /* We make these static functions to avoid the address of the iterator escaping the local context. */ static tree remove_node (tree head, tree node); static tree reveal_node (tree ovl, tree node); }; /* Iterator over a (potentially) 2 dimensional overload, which is produced by name lookup. */ class lkp_iterator : public ovl_iterator { typedef ovl_iterator parent; tree outer; public: explicit lkp_iterator (tree o) : parent (o, true), outer (maybe_push ()) { } public: lkp_iterator &operator++ () { bool repush = !outer; if (!parent::operator++ () && !repush) { pop (outer); repush = true; } if (repush) outer = maybe_push (); return *this; } }; /* hash traits for declarations. Hashes potential overload sets via DECL_NAME. */ struct named_decl_hash : ggc_remove <tree> { typedef tree value_type; /* A DECL or OVERLOAD */ typedef tree compare_type; /* An identifier. */ inline static hashval_t hash (const value_type decl); inline static bool equal (const value_type existing, compare_type candidate); static inline void mark_empty (value_type &p) {p = NULL_TREE;} static inline bool is_empty (value_type p) {return !p;} /* Nothing is deletable. Everything is insertable. */ static bool is_deleted (value_type) { return false; } static void mark_deleted (value_type) { gcc_unreachable (); } }; struct GTY(()) tree_template_decl { struct tree_decl_common common; tree arguments; tree result; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base in which lookup found the BASELINK_FUNCTIONS. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* If T is a BASELINK, grab the functions, otherwise just T, which is expected to already be a (list of) functions. */ #define MAYBE_BASELINK_FUNCTIONS(T) \ (BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED }; /* The various kinds of C++0x warnings we encounter. */ enum cpp0x_warn_str { /* extended initializer lists */ CPP0X_INITIALIZER_LISTS, /* explicit conversion operators */ CPP0X_EXPLICIT_CONVERSION, /* variadic templates */ CPP0X_VARIADIC_TEMPLATES, /* lambda expressions */ CPP0X_LAMBDA_EXPR, /* C++0x auto */ CPP0X_AUTO, /* scoped enums */ CPP0X_SCOPED_ENUMS, /* defaulted and deleted functions */ CPP0X_DEFAULTED_DELETED, /* inline namespaces */ CPP0X_INLINE_NAMESPACES, /* override controls, override/final */ CPP0X_OVERRIDE_CONTROLS, /* non-static data member initializers */ CPP0X_NSDMI, /* user defined literals */ CPP0X_USER_DEFINED_LITERALS, /* delegating constructors */ CPP0X_DELEGATING_CTORS, /* inheriting constructors */ CPP0X_INHERITING_CTORS, /* C++11 attributes */ CPP0X_ATTRIBUTES, /* ref-qualified member functions */ CPP0X_REF_QUALIFIER }; /* The various kinds of operation used by composite_pointer_type. */ enum composite_pointer_operation { /* comparison */ CPO_COMPARISON, /* conversion */ CPO_CONVERSION, /* conditional expression */ CPO_CONDITIONAL_EXPR }; /* Possible cases of expression list used by build_x_compound_expr_from_list. */ enum expr_list_kind { ELK_INIT, /* initializer */ ELK_MEM_INIT, /* member initializer */ ELK_FUNC_CAST /* functional cast */ }; /* Possible cases of implicit bad rhs conversions. */ enum impl_conv_rhs { ICR_DEFAULT_ARGUMENT, /* default argument */ ICR_CONVERTING, /* converting */ ICR_INIT, /* initialization */ ICR_ARGPASS, /* argument passing */ ICR_RETURN, /* return */ ICR_ASSIGN /* assignment */ }; /* Possible cases of implicit or explicit bad conversions to void. */ enum impl_conv_void { ICV_CAST, /* (explicit) conversion to void */ ICV_SECOND_OF_COND, /* second operand of conditional expression */ ICV_THIRD_OF_COND, /* third operand of conditional expression */ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ ICV_LEFT_OF_COMMA, /* left operand of comma operator */ ICV_STATEMENT, /* statement */ ICV_THIRD_IN_FOR /* for increment expression */ }; /* Possible invalid uses of an abstract class that might not have a specific associated declaration. */ enum GTY(()) abstract_class_use { ACU_UNKNOWN, /* unknown or decl provided */ ACU_CAST, /* cast to abstract class */ ACU_NEW, /* new-expression of abstract class */ ACU_THROW, /* throw-expression of abstract class */ ACU_CATCH, /* catch-parameter of abstract class */ ACU_ARRAY, /* array of abstract class */ ACU_RETURN, /* return type of abstract class */ ACU_PARM /* parameter type of abstract class */ }; /* Macros for access to language-specific slots in an identifier. */ /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. Its PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) /* Kinds of identifiers. Values are carefully chosen. */ enum cp_identifier_kind { cik_normal = 0, /* Not a special identifier. */ cik_keyword = 1, /* A keyword. */ cik_ctor = 2, /* Constructor (in-chg, complete or base). */ cik_dtor = 3, /* Destructor (in-chg, deleting, complete or base). */ cik_simple_op = 4, /* Non-assignment operator name. */ cik_assign_op = 5, /* An assignment operator name. */ cik_conv_op = 6, /* Conversion operator name. */ cik_reserved_for_udlit = 7, /* Not yet in use */ cik_max }; /* Kind bits. */ #define IDENTIFIER_KIND_BIT_0(NODE) \ TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE)) #define IDENTIFIER_KIND_BIT_1(NODE) \ TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE)) #define IDENTIFIER_KIND_BIT_2(NODE) \ TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE)) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) \ TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE)) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) \ TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE)) /* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague linkage which the prelinker has assigned to this translation unit. */ #define IDENTIFIER_REPO_CHOSEN(NAME) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (NAME))) /* True if this identifier is a reserved word. C_RID_CODE (node) is then the RID_* value of the keyword. Value 1. */ #define IDENTIFIER_KEYWORD_P(NODE) \ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ & (!IDENTIFIER_KIND_BIT_1 (NODE)) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is the name of a constructor or destructor. Value 2 or 3. */ #define IDENTIFIER_CDTOR_P(NODE) \ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ & IDENTIFIER_KIND_BIT_1 (NODE)) /* True if this identifier is the name of a constructor. Value 2. */ #define IDENTIFIER_CTOR_P(NODE) \ (IDENTIFIER_CDTOR_P(NODE) \ & (!IDENTIFIER_KIND_BIT_0 (NODE))) /* True if this identifier is the name of a destructor. Value 3. */ #define IDENTIFIER_DTOR_P(NODE) \ (IDENTIFIER_CDTOR_P(NODE) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is for any operator name (including conversions). Value 4, 5, 6 or 7. */ #define IDENTIFIER_ANY_OP_P(NODE) \ (IDENTIFIER_KIND_BIT_2 (NODE)) /* True if this identifier is for an overloaded operator. Values 4, 5. */ #define IDENTIFIER_OVL_OP_P(NODE) \ (IDENTIFIER_ANY_OP_P (NODE) \ & (!IDENTIFIER_KIND_BIT_1 (NODE))) /* True if this identifier is for any assignment. Values 5. */ #define IDENTIFIER_ASSIGN_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is the name of a type-conversion operator. Value 7. */ #define IDENTIFIER_CONV_OP_P(NODE) \ (IDENTIFIER_ANY_OP_P (NODE) \ & IDENTIFIER_KIND_BIT_1 (NODE) \ & (!IDENTIFIER_KIND_BIT_0 (NODE))) /* True if this identifier is a new or delete operator. */ #define IDENTIFIER_NEWDEL_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ && IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC) /* True if this identifier is a new operator. */ #define IDENTIFIER_NEW_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ && (IDENTIFIER_OVL_OP_FLAGS (NODE) \ & (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC) /* Access a C++-specific index for identifier NODE. Used to optimize operator mappings etc. */ #define IDENTIFIER_CP_INDEX(NODE) \ (IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the default argument. */ #define DEFARG_TOKENS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens) #define DEFARG_INSTANTIATIONS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations) struct GTY (()) tree_default_arg { struct tree_common common; struct cp_token_cache *tokens; vec<tree, va_gc> *instantiations; }; #define DEFERRED_NOEXCEPT_PATTERN(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern) #define DEFERRED_NOEXCEPT_ARGS(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args) #define DEFERRED_NOEXCEPT_SPEC_P(NODE) \ ((NODE) && (TREE_PURPOSE (NODE)) \ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT)) #define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \ (DEFERRED_NOEXCEPT_SPEC_P (NODE) \ && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE) struct GTY (()) tree_deferred_noexcept { struct tree_base base; tree pattern; tree args; }; /* The condition associated with the static assertion. This must be an integral constant expression. */ #define STATIC_ASSERT_CONDITION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) /* The message associated with the static assertion. This must be a string constant, which will be emitted as an error message when the static assert condition is false. */ #define STATIC_ASSERT_MESSAGE(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) /* Source location information for a static assertion. */ #define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) struct GTY (()) tree_static_assert { struct tree_common common; tree condition; tree message; location_t location; }; struct GTY (()) tree_argument_pack_select { struct tree_common common; tree argument_pack; int index; }; /* The different kinds of traits that we encounter. */ enum cp_trait_kind { CPTK_BASES, CPTK_DIRECT_BASES, CPTK_HAS_NOTHROW_ASSIGN, CPTK_HAS_NOTHROW_CONSTRUCTOR, CPTK_HAS_NOTHROW_COPY, CPTK_HAS_TRIVIAL_ASSIGN, CPTK_HAS_TRIVIAL_CONSTRUCTOR, CPTK_HAS_TRIVIAL_COPY, CPTK_HAS_TRIVIAL_DESTRUCTOR, CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS, CPTK_HAS_VIRTUAL_DESTRUCTOR, CPTK_IS_ABSTRACT, CPTK_IS_AGGREGATE, CPTK_IS_BASE_OF, CPTK_IS_CLASS, CPTK_IS_EMPTY, CPTK_IS_ENUM, CPTK_IS_FINAL, CPTK_IS_LITERAL_TYPE, CPTK_IS_POD, CPTK_IS_POLYMORPHIC, CPTK_IS_SAME_AS, CPTK_IS_STD_LAYOUT, CPTK_IS_TRIVIAL, CPTK_IS_TRIVIALLY_ASSIGNABLE, CPTK_IS_TRIVIALLY_CONSTRUCTIBLE, CPTK_IS_TRIVIALLY_COPYABLE, CPTK_IS_UNION, CPTK_UNDERLYING_TYPE, CPTK_IS_ASSIGNABLE, CPTK_IS_CONSTRUCTIBLE }; /* The types that we are processing. */ #define TRAIT_EXPR_TYPE1(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) #define TRAIT_EXPR_TYPE2(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) /* The specific trait that we are processing. */ #define TRAIT_EXPR_KIND(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) struct GTY (()) tree_trait_expr { struct tree_common common; tree type1; tree type2; enum cp_trait_kind kind; }; /* Based off of TYPE_UNNAMED_P. */ #define LAMBDA_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE)) /* Test if FUNCTION_DECL is a lambda function. */ #define LAMBDA_FUNCTION_P(FNDECL) \ (DECL_DECLARES_FUNCTION_P (FNDECL) \ && DECL_OVERLOADED_OPERATOR_P (FNDECL) \ && DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) enum cp_lambda_default_capture_mode_type { CPLD_NONE, CPLD_COPY, CPLD_REFERENCE }; /* The method of default capture, if any. */ #define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) /* The capture-list, including `this'. Each capture is stored as a FIELD_DECL * so that the name, type, and field are all together, whether or not it has * been added to the lambda's class type. TREE_LIST: TREE_PURPOSE: The FIELD_DECL for this capture. TREE_VALUE: The initializer. This is part of a GNU extension. */ #define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) /* During parsing of the lambda-introducer, the node in the capture-list that holds the 'this' capture. During parsing of the body, the capture proxy for that node. */ #define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) /* Predicate tracking whether `this' is in the effective capture set. */ #define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ LAMBDA_EXPR_THIS_CAPTURE(NODE) /* Predicate tracking whether the lambda was declared 'mutable'. */ #define LAMBDA_EXPR_MUTABLE_P(NODE) \ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) /* True iff uses of a const variable capture were optimized away. */ #define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \ TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE)) /* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit capture. */ #define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* The source location of the lambda. */ #define LAMBDA_EXPR_LOCATION(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) /* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ #define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) /* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ #define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) /* During parsing of the lambda, a vector of capture proxies which need to be pushed once we're done processing a nested lambda. */ #define LAMBDA_EXPR_PENDING_PROXIES(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies) /* The closure type of the lambda, which is also the type of the LAMBDA_EXPR. */ #define LAMBDA_EXPR_CLOSURE(NODE) \ (TREE_TYPE (LAMBDA_EXPR_CHECK (NODE))) struct GTY (()) tree_lambda_expr { struct tree_typed typed; tree capture_list; tree this_capture; tree extra_scope; vec<tree, va_gc> *pending_proxies; location_t locus; enum cp_lambda_default_capture_mode_type default_capture_mode; int discriminator; }; /* A (typedef,context,usage location) triplet. It represents a typedef used through a context at a given source location. e.g. struct foo { typedef int myint; }; struct bar { foo::myint v; // #1<-- this location. }; In bar, the triplet will be (myint, foo, #1). */ struct GTY(()) qualified_typedef_usage_s { tree typedef_decl; tree context; location_t locus; }; typedef struct qualified_typedef_usage_s qualified_typedef_usage_t; /* Non-zero if this template specialization has access violations that should be rechecked when the function is instantiated outside argument deduction. */ #define TINFO_HAS_ACCESS_ERRORS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE))) #define FNDECL_HAS_ACCESS_ERRORS(NODE) \ (TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE))) /* Non-zero if this variable template specialization was specified using a template-id, so it's a partial or full specialization and not a definition of the member template of a particular class specialization. */ #define TINFO_USED_TEMPLATE_ID(NODE) \ (TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE))) struct GTY(()) tree_template_info { struct tree_common common; vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking; }; // Constraint information for a C++ declaration. Constraint information is // comprised of: // // - a constraint expression introduced by the template header // - a constraint expression introduced by a function declarator // - the associated constraints, which are the conjunction of those, // and used for declaration matching // // The template and declarator requirements are kept to support pretty // printing constrained declarations. struct GTY(()) tree_constraint_info { struct tree_base base; tree template_reqs; tree declarator_reqs; tree associated_constr; }; // Require that pointer P is non-null before returning. template<typename T> inline T* check_nonnull (T* p) { gcc_assert (p); return p; } // Returns true iff T is non-null and represents constraint info. inline tree_constraint_info * check_constraint_info (tree t) { if (t && TREE_CODE (t) == CONSTRAINT_INFO) return (tree_constraint_info *)t; return NULL; } // Access the expression describing the template constraints. This may be // null if no constraints were introduced in the template parameter list, // a requirements clause after the template parameter list, or constraints // through a constrained-type-specifier. #define CI_TEMPLATE_REQS(NODE) \ check_constraint_info (check_nonnull(NODE))->template_reqs // Access the expression describing the trailing constraints. This is non-null // for any implicit instantiation of a constrained declaration. For a // templated declaration it is non-null only when a trailing requires-clause // was specified. #define CI_DECLARATOR_REQS(NODE) \ check_constraint_info (check_nonnull(NODE))->declarator_reqs // The computed associated constraint expression for a declaration. #define CI_ASSOCIATED_CONSTRAINTS(NODE) \ check_constraint_info (check_nonnull(NODE))->associated_constr // Access the logical constraints on the template parameters introduced // at a given template parameter list level indicated by NODE. #define TEMPLATE_PARMS_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) // Access the logical constraints on the template parameter declaration // indicated by NODE. #define TEMPLATE_PARM_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) /* Non-zero if the noexcept is present in a compound requirement. */ #define COMPOUND_REQ_NOEXCEPT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ)) /* The constraints on an 'auto' placeholder type, used in an argument deduction constraint. */ #define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \ DECL_SIZE_UNIT (TYPE_NAME (NODE)) /* The expression evaluated by the predicate constraint. */ #define PRED_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0) /* The concept of a concept check. */ #define CHECK_CONSTR_CONCEPT(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0) /* The template arguments of a concept check. */ #define CHECK_CONSTR_ARGS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1) /* The expression validated by the predicate constraint. */ #define EXPR_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0) /* The type validated by the predicate constraint. */ #define TYPE_CONSTR_TYPE(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0) /* In an implicit conversion constraint, the source expression. */ #define ICONV_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0) /* In an implicit conversion constraint, the target type. */ #define ICONV_CONSTR_TYPE(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1) /* In an argument deduction constraint, the source expression. */ #define DEDUCT_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0) /* In an argument deduction constraint, the target type pattern. */ #define DEDUCT_CONSTR_PATTERN(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1) /* In an argument deduction constraint, the list of placeholder nodes. */ #define DEDUCT_CONSTR_PLACEHOLDER(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2) /* The expression of an exception constraint. */ #define EXCEPT_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0) /* In a parameterized constraint, the local parameters. */ #define PARM_CONSTR_PARMS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0) /* In a parameterized constraint, the operand. */ #define PARM_CONSTR_OPERAND(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1) /* Whether a PARM_DECL represents a local parameter in a requires-expression. */ #define CONSTRAINT_VAR_P(NODE) \ DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL)) /* The concept constraining this constrained template-parameter. */ #define CONSTRAINED_PARM_CONCEPT(NODE) \ DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE)) /* Any extra template arguments specified for a constrained template-parameter. */ #define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \ DECL_SIZE (TYPE_DECL_CHECK (NODE)) /* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a prototype for the constrained parameter in finish_shorthand_constraint, attached for convenience. */ #define CONSTRAINED_PARM_PROTOTYPE(NODE) \ DECL_INITIAL (TYPE_DECL_CHECK (NODE)) enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_PTRMEM, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_TEMPLATE_DECL, TS_CP_DEFAULT_ARG, TS_CP_DEFERRED_NOEXCEPT, TS_CP_STATIC_ASSERT, TS_CP_ARGUMENT_PACK_SELECT, TS_CP_TRAIT_EXPR, TS_CP_LAMBDA_EXPR, TS_CP_TEMPLATE_INFO, TS_CP_CONSTRAINT_INFO, TS_CP_USERDEF_LITERAL }; /* The resulting tree type. */ union GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl; struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg; struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) static_assertion; struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) argument_pack_select; struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) trait_expression; struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) lambda_expression; struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) template_info; struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO"))) constraint_info; struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL"))) userdef_literal; }; /* Global state. */ struct GTY(()) saved_scope { vec<cxx_saved_binding, va_gc> *old_bindings; tree old_namespace; vec<tree, va_gc> *decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; vec<tree, va_gc> *lang_base; tree lang_name; tree template_parms; cp_binding_level *x_previous_class_level; tree x_saved_tree; /* Only used for uses of this in trailing return type. */ tree x_current_class_ptr; tree x_current_class_ref; int x_processing_template_decl; int x_processing_specialization; BOOL_BITFIELD x_processing_explicit_instantiation : 1; BOOL_BITFIELD need_pop_function_context : 1; /* Nonzero if we are parsing the discarded statement of a constexpr if-statement. */ BOOL_BITFIELD discarded_stmt : 1; int unevaluated_operand; int inhibit_evaluation_warnings; int noexcept_operand; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int omp_declare_target_attribute; struct stmt_tree_s x_stmt_tree; cp_binding_level *class_bindings; cp_binding_level *bindings; hash_map<tree, tree> *GTY((skip)) x_local_specializations; struct saved_scope *prev; }; extern GTY(()) struct saved_scope *scope_chain; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* When parsing a template declaration, a TREE_LIST represents the active template parameters. Each node in the list represents one level of template parameters. The innermost level is first in the list. The depth of each level is stored as an INTEGER_CST in the TREE_PURPOSE of each node. The parameters for that level are stored in the TREE_VALUE. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation #define in_discarded_stmt scope_chain->discarded_stmt /* RAII sentinel to handle clearing processing_template_decl and restoring it when done. */ struct processing_template_decl_sentinel { int saved; processing_template_decl_sentinel (bool reset = true) : saved (processing_template_decl) { if (reset) processing_template_decl = 0; } ~processing_template_decl_sentinel() { processing_template_decl = saved; } }; /* RAII sentinel to disable certain warnings during template substitution and elsewhere. */ struct warning_sentinel { int &flag; int val; warning_sentinel(int& flag, bool suppress=true) : flag(flag), val(flag) { if (suppress) flag = 0; } ~warning_sentinel() { flag = val; } }; /* RAII sentinel that saves the value of a variable, optionally overrides it right away, and restores its value when the sentinel id destructed. */ template <typename T> class temp_override { T& overridden_variable; T saved_value; public: temp_override(T& var) : overridden_variable (var), saved_value (var) {} temp_override(T& var, T overrider) : overridden_variable (var), saved_value (var) { overridden_variable = overrider; } ~temp_override() { overridden_variable = saved_value; } }; /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A map from local variable declarations in the body of the template presently being instantiated to the corresponding instantiated local variables. */ #define local_specializations scope_chain->x_local_specializations /* Nonzero if we are parsing the operand of a noexcept operator. */ #define cp_noexcept_operand scope_chain->noexcept_operand /* A list of private types mentioned, for deferred access checking. */ struct GTY((for_user)) cxx_int_tree_map { unsigned int uid; tree to; }; struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map> { static hashval_t hash (cxx_int_tree_map *); static bool equal (cxx_int_tree_map *, cxx_int_tree_map *); }; struct named_label_entry; /* Defined in decl.c. */ struct named_label_hash : ggc_remove <named_label_entry *> { typedef named_label_entry *value_type; typedef tree compare_type; /* An identifier. */ inline static hashval_t hash (value_type); inline static bool equal (const value_type, compare_type); inline static void mark_empty (value_type &p) {p = NULL;} inline static bool is_empty (value_type p) {return !p;} /* Nothing is deletable. Everything is insertable. */ inline static bool is_deleted (value_type) { return false; } inline static void mark_deleted (value_type) { gcc_unreachable (); } }; /* Global state pertinent to the current function. */ struct GTY(()) language_function { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; tree x_auto_return_pattern; BOOL_BITFIELD returns_value : 1; BOOL_BITFIELD returns_null : 1; BOOL_BITFIELD returns_abnormally : 1; BOOL_BITFIELD infinite_loop: 1; BOOL_BITFIELD x_in_function_try_handler : 1; BOOL_BITFIELD x_in_base_initializer : 1; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; BOOL_BITFIELD invalid_constexpr : 1; hash_table<named_label_hash> *x_named_labels; cp_binding_level *bindings; vec<tree, va_gc> *x_local_names; /* Tracking possibly infinite loops. This is a vec<tree> only because vec<bool> doesn't work with gtype. */ vec<tree, va_gc> *infinite_loops; hash_table<cxx_int_tree_map_hasher> *extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been done. I.e., just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ptr \ : &scope_chain->x_current_class_ptr)) #define current_class_ref \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ref \ : &scope_chain->x_current_class_ref)) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Set to 0 at beginning of a function definition, set to 1 if we see an obvious infinite loop. This can have false positives and false negatives, so it should only be used as a heuristic. */ #define current_function_infinite_loop cp_function_chain->infinite_loop /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->x_in_base_initializer #define in_function_try_handler cp_function_chain->x_in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* A type involving 'auto' to be used for return type deduction. */ #define current_function_auto_return_pattern \ (cp_function_chain->x_auto_return_pattern) /* In parser.c. */ extern tree cp_literal_operator_id (const char *); /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Nonzero if NODE has no name for linkage purposes. */ #define TYPE_UNNAMED_P(NODE) \ (OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a type that could resolve to any kind of concrete type at instantiation time. */ #define WILDCARD_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (T) == DECLTYPE_TYPE) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T)) /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or union type. */ #define SET_CLASS_TYPE_P(T, VAL) \ (TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class type but not an union. */ #define NON_UNION_CLASS_TYPE_P(T) \ (TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T)) /* Keep these checks in ascending code order. */ #define RECORD_OR_UNION_CODE_P(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define OVERLOAD_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) struct GTY (()) tree_pair_s { tree purpose; tree value; }; typedef tree_pair_s *tree_pair_p; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct GTY(()) lang_type { unsigned char align; unsigned has_type_conversion : 1; unsigned has_copy_ctor : 1; unsigned has_default_ctor : 1; unsigned const_needs_init : 1; unsigned ref_needs_init : 1; unsigned has_const_copy_assign : 1; unsigned use_template : 2; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_copy_assign : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; /* 32 bits allocated. */ unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned ptrmemfunc_flag : 1; unsigned was_anonymous : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_copy_assign : 1; unsigned lazy_destructor : 1; unsigned has_const_copy_ctor : 1; unsigned has_complex_copy_ctor : 1; unsigned has_complex_copy_assign : 1; unsigned non_aggregate : 1; unsigned has_complex_dflt : 1; unsigned has_list_ctor : 1; unsigned non_std_layout : 1; unsigned is_literal : 1; unsigned lazy_move_ctor : 1; unsigned lazy_move_assign : 1; unsigned has_complex_move_ctor : 1; unsigned has_complex_move_assign : 1; unsigned has_constexpr_ctor : 1; unsigned unique_obj_representations : 1; unsigned unique_obj_representations_set : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 4; tree primary_base; vec<tree_pair_s, va_gc> *vcall_indices; tree vtables; tree typeinfo_var; vec<tree, va_gc> *vbases; binding_table nested_udts; tree as_base; vec<tree, va_gc> *pure_virtuals; tree friend_classes; vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members; tree key_method; tree decl_list; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* FIXME reuse another field? */ tree lambda_expr; }; /* We used to have a variant type for lang_type. Keep the name of the checking accessor for the sole survivor. */ #define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE)) /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has a move constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that NODE (a class type) is final */ #define CLASSTYPE_FINAL(NODE) \ TYPE_FINAL_P (NODE) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor) #define TYPE_HAS_CONST_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) /* Nonzero if this class has an X(initializer_list<T>) constructor. */ #define TYPE_HAS_LIST_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) /* Nonzero if this class has a constexpr constructor other than a copy/move constructor. Note that a class can have constexpr constructors for static initialization even if it isn't a literal class. */ #define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Nonzero means that this type is either complete or being defined, so we can do lookup in it. */ #define COMPLETE_OR_OPEN_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector of members. During definition, it is unordered and only member functions are present. After completion it is sorted and contains both member functions and non-functions. STAT_HACK is involved to preserve oneslot per name invariant. */ #define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ (get_class_binding_direct (NODE, ctor_identifier)) /* A FUNCTION_DECL for the destructor for NODE. This is the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTOR(NODE) \ (get_class_binding_direct (NODE, dtor_identifier)) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes or tail padding. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* A vec<tree> of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type is an abstract class type. */ #define ABSTRACT_CLASS_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE)) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is not POD for the purpose of layout (as defined in the ABI). This is different from the language's POD. */ #define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class type is a non-standard-layout class. */ #define CLASSTYPE_NON_STD_LAYOUT(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) /* Nonzero means that this class type does have unique object representations. */ #define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations) /* Nonzero means that this class type has CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */ #define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* The associated LAMBDA_EXPR that made this class. */ #define CLASSTYPE_LAMBDA_EXPR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) /* The extra mangling scope for this closure type. */ #define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* A vec<tree_pair_s> of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* Whether or not this entry is for a lost primary virtual base. */ #define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. For a noexcept specification, TREE_VALUE is NULL_TREE and TREE_PURPOSE is the constant-expression. For a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT (for templates) or an OVERLOAD list of functions (for implicitly declared functions). */ #define TYPE_RAISES_EXCEPTIONS(NODE) \ TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' or noexcept(true). */ #define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the case for things declared noexcept(true) and, with -fnothrow-opt, for throw() functions. */ #define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (LANG_DECL_NS_CHECK (NODE)->level) /* Discriminator values for lang_decl. */ enum lang_decl_selector { lds_min, lds_fn, lds_ns, lds_parm, lds_decomp }; /* Flags shared by all forms of DECL_LANG_SPECIFIC. Some of the flags live here only to make lang_decl_min/fn smaller. Do not make this struct larger than 32 bits; instead, make sel smaller. */ struct GTY(()) lang_decl_base { /* Larger than necessary for faster access. */ ENUM_BITFIELD(lang_decl_selector) selector : 16; ENUM_BITFIELD(languages) language : 1; unsigned use_template : 2; unsigned not_really_extern : 1; /* var or fn */ unsigned initialized_in_class : 1; /* var or fn */ unsigned repo_available_p : 1; /* var or fn */ unsigned threadprivate_or_deleted_p : 1; /* var or fn */ unsigned anticipated_p : 1; /* fn, type or template */ /* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */ unsigned friend_or_tls : 1; /* var, fn, type or template */ unsigned unknown_bound_p : 1; /* var */ unsigned odr_used : 1; /* var or fn */ unsigned u2sel : 1; unsigned concept_p : 1; /* applies to vars and functions */ unsigned var_declared_inline_p : 1; /* var */ unsigned dependent_init_p : 1; /* var */ /* 1 spare bit */ }; /* True for DECL codes which have template info and access. */ #define LANG_DECL_HAS_MIN(NODE) \ (VAR_OR_FUNCTION_DECL_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == USING_DECL) /* DECL_LANG_SPECIFIC for the above codes. */ struct GTY(()) lang_decl_min { struct lang_decl_base base; /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree template_info; union lang_decl_u2 { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_VIRTUAL_OFFSET. In a VAR_DECL for which DECL_HAS_VALUE_EXPR_P holds, this is DECL_CAPTURED_VARIABLE. Otherwise this is DECL_ACCESS. */ tree GTY ((tag ("0"))) access; /* For TREE_STATIC VAR_DECL in function, this is DECL_DISCRIMINATOR. */ int GTY ((tag ("1"))) discriminator; } GTY ((desc ("%0.u.base.u2sel"))) u2; }; /* Additional DECL_LANG_SPECIFIC information for functions. */ struct GTY(()) lang_decl_fn { struct lang_decl_min min; /* In a overloaded operator, this is the compressed operator code. */ unsigned ovl_op_code : 6; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned defaulted_p : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned pending_inline_p : 1; unsigned nonconverting : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned hidden_friend_p : 1; unsigned omp_declare_reduction_p : 1; unsigned spare : 13; /* 32-bits padding on 64-bit host. */ /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%1.thunk_p"))) u5; union lang_decl_u3 { struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; struct language_function * GTY ((tag ("0"))) saved_language_function; } GTY ((desc ("%1.pending_inline_p"))) u; }; /* DECL_LANG_SPECIFIC for namespaces. */ struct GTY(()) lang_decl_ns { struct lang_decl_base base; cp_binding_level *level; /* using directives and inline children. These need to be va_gc, because of PCH. */ vec<tree, va_gc> *usings; vec<tree, va_gc> *inlinees; /* Hash table of bound decls. It'd be nice to have this inline, but as the hash_map has a dtor, we can't then put this struct into a union (until moving to c++11). */ hash_table<named_decl_hash> *bindings; }; /* DECL_LANG_SPECIFIC for parameters. */ struct GTY(()) lang_decl_parm { struct lang_decl_base base; int level; int index; }; /* Additional DECL_LANG_SPECIFIC information for structured bindings. */ struct GTY(()) lang_decl_decomp { struct lang_decl_min min; /* The artificial underlying "e" variable of the structured binding variable. */ tree base; }; /* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a union rather than a struct containing a union as its only field, but tree.h declares it as a struct. */ struct GTY(()) lang_decl { union GTY((desc ("%h.base.selector"))) lang_decl_u { /* Nothing of only the base type exists. */ struct lang_decl_base GTY ((default)) base; struct lang_decl_min GTY((tag ("lds_min"))) min; struct lang_decl_fn GTY ((tag ("lds_fn"))) fn; struct lang_decl_ns GTY((tag ("lds_ns"))) ns; struct lang_decl_parm GTY((tag ("lds_parm"))) parm; struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp; } u; }; /* Looks through a template (if present) to find what it declares. */ #define STRIP_TEMPLATE(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_MIN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE)) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min; }) /* We want to be able to check DECL_CONSTRUCTOR_P and such on a function template, not just on a FUNCTION_DECL. So when looking for things in lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ #define LANG_DECL_FN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ if (!DECL_DECLARES_FUNCTION_P (NODE) \ || lt->u.base.selector != lds_fn) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.fn; }) #define LANG_DECL_NS_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != NAMESPACE_DECL \ || lt->u.base.selector != lds_ns) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ns; }) #define LANG_DECL_PARM_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != PARM_DECL \ || lt->u.base.selector != lds_parm) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.parm; }) #define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!VAR_P (NODE) \ || lt->u.base.selector != lds_decomp) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.decomp; }) #define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min.u2; }) #else #define LANG_DECL_MIN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.min) #define LANG_DECL_FN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) #define LANG_DECL_NS_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.ns) #define LANG_DECL_PARM_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.parm) #define LANG_DECL_DECOMP_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.decomp) #define LANG_DECL_U2_CHECK(NODE, TF) \ (&DECL_LANG_SPECIFIC (NODE)->u.min.u2) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) /* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ IDENTIFIER_CTOR_P (DECL_NAME (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ #define DECL_MOVE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) /* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL) is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ IDENTIFIER_DTOR_P (DECL_NAME (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true)) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false)) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (!(TREE_CODE (FN) == FUNCTION_DECL \ && (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\ ; \ else \ for (CLONE = DECL_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = DECL_CHAIN (CLONE)) /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator) /* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */ #define DECL_DISCRIMINATOR_SET_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1) /* The index of a user-declared parameter in its function, starting at 1. All artificial parameters will have index 0. */ #define DECL_PARM_INDEX(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->index) /* The level of a user-declared parameter in its function, starting at 1. A parameter of the function will have level 1; a parameter of the first nested function declarator (i.e. t in void f (void (*p)(T t))) will have level 2. */ #define DECL_PARM_LEVEL(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->level) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE)) /* The type to which conversion operator FN converts to. */ #define DECL_CONV_FN_TYPE(FN) \ TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN))) /* Nonzero if NODE, a static data member, was declared in its class as an array of unknown bound. */ #define VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \ : false) #define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true) /* True iff decl NODE is for an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ IDENTIFIER_ANY_OP_P (DECL_NAME (NODE)) /* Nonzero if NODE is an assignment operator (including += and such). */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE)) /* NODE is a function_decl for an overloaded operator. Return its compressed (raw) operator code. Note that this is not a TREE_CODE. */ #define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \ (LANG_DECL_FN_CHECK (NODE)->ovl_op_code) /* DECL is an overloaded operator. Test whether it is for TREE_CODE (a literal constant). */ #define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \ (DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it is also used to determine how program storage should be allocated. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERNAL, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided or a non-trivial constructor is called. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.initialized_in_class) /* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. Only available for decls with DECL_LANG_SPECIFIC. */ #define DECL_ODR_USED(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.odr_used) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.friend_or_tls) /* Nonzero if the thread-local variable was declared with __thread as opposed to thread_local. */ #define DECL_GNU_TLS_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls) #define SET_DECL_GNU_TLS_P(NODE) \ (retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \ DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for _DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE)) /* Nonzero for _DECL means that this constructor or conversion function is non-converting. */ #define DECL_NONCONVERTING_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pure_virtual) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* True (in a FUNCTION_DECL) if NODE is a function declared with an override virt-specifier */ #define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* Set DECL_THUNKS. */ #define SET_DECL_THUNKS(NODE,THUNKS) \ (LANG_DECL_FN_CHECK (NODE)->context = (THUNKS)) /* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this is the constructor it inherits from. */ #define DECL_INHERITED_CTOR(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* And this is the base that constructor comes from. */ #define DECL_INHERITED_CTOR_BASE(NODE) \ (DECL_INHERITED_CTOR (NODE) \ ? DECL_CONTEXT (flag_new_inheriting_ctors \ ? strip_inheriting_ctors (NODE) \ : DECL_INHERITED_CTOR (NODE)) \ : NULL_TREE) /* Set the inherited base. */ #define SET_DECL_INHERITED_CTOR(NODE,INH) \ (LANG_DECL_FN_CHECK (NODE)->context = (INH)) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && LANG_DECL_FN_CHECK (NODE)->thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True iff DECL is an entity with vague linkage whose definition is available in this translation unit. */ #define DECL_REPO_AVAILABLE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p) /* True if DECL is declared 'constexpr'. */ #define DECL_DECLARED_CONSTEXPR_P(DECL) \ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) // True if NODE was declared as 'concept'. The flag implies that the // declaration is constexpr, that the declaration cannot be specialized or // refined, and that the result type must be convertible to bool. #define DECL_DECLARED_CONCEPT_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.concept_p) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (DECL_NAME (NODE) \ && id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__")) /* Nonzero if the variable was declared to be thread-local. We need a special C++ version of this test because the middle-end DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for templates. */ #define CP_DECL_THREAD_LOCAL_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f () { ... } }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_DECLARES_FUNCTION_P (NODE) \ && DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) #define CP_DECL_CONTEXT(NODE) \ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) \ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) #define NAMESPACE_SCOPE_P(NODE) \ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. */ #define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */ #define FUNCTION_REF_QUALIFIED(NODE) \ TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */ #define FUNCTION_RVALUE_QUALIFIED(NODE) \ TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE)) /* Returns 1 iff VAR_DECL is a construction virtual table. DECL_VTABLE_OR_VTT_P will be true in this case and must be checked before using this macro. */ #define DECL_CONSTRUCTION_VTABLE_P(NODE) \ TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* The nesting depth of namespace, class or function. Makes is_ancestor much simpler. Only 8 bits available. */ #define SCOPE_DEPTH(NODE) \ (NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space) /* Whether the namepace is an inline namespace. */ #define DECL_NAMESPACE_INLINE_P(NODE) \ TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, a vector of using directives. */ #define DECL_NAMESPACE_USING(NODE) \ (LANG_DECL_NS_CHECK (NODE)->usings) /* In a NAMESPACE_DECL, a vector of inline namespaces. */ #define DECL_NAMESPACE_INLINEES(NODE) \ (LANG_DECL_NS_CHECK (NODE)->inlinees) /* Pointer to hash_map from IDENTIFIERS to DECLS */ #define DECL_NAMESPACE_BINDINGS(NODE) \ (LANG_DECL_NS_CHECK (NODE)->bindings) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ (TREE_CODE (NODE) == NAMESPACE_DECL \ && CP_DECL_CONTEXT (NODE) == global_namespace \ && DECL_NAME (NODE) == std_identifier) /* In a TREE_LIST in an attribute list, indicates that the attribute must be applied at instantiation time. */ #define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag was inherited from a template parameter, not explicitly indicated. */ #define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) extern tree decl_shadowed_for_var_lookup (tree); extern void decl_shadowed_for_var_insert (tree, tree); /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* Non zero if the using decl refers to a dependent type. */ #define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE)) /* In a VAR_DECL, true if we have a shadowed local variable in the shadowed var table for this VAR_DECL. */ #define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \ (VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p) /* In a VAR_DECL for a variable declared in a for statement, this is the shadowed (local) variable. */ #define DECL_SHADOWED_FOR_VAR(NODE) \ (DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL) #define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \ (decl_shadowed_for_var_insert (NODE, VAL)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) /* Nonzero for TYPE_DECL means that it was written 'using name = type'. */ #define TYPE_DECL_ALIAS_P(NODE) \ DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE)) /* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */ #define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \ DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a type which is an alias for another type; i.e, a type which declaration was written 'using name-of-type = another-type'. */ #define TYPE_ALIAS_P(NODE) \ (TYPE_P (NODE) \ && TYPE_NAME (NODE) \ && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \ && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE))) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or TEMPLATE_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is nonzero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TREE_VALUE is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \ ->u.min.template_info) /* For a lambda capture proxy, its captured variable. */ #define DECL_CAPTURED_VARIABLE(NODE) \ (LANG_DECL_U2_CHECK (NODE, 0)->access) /* For a VAR_DECL, indicates that the variable is actually a non-static data member of anonymous union that has been promoted to variable status. */ #define DECL_ANON_UNION_VAR_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE))) /* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias templateness of NODE. It'd be nice if this could unconditionally access the slot, rather than return NULL if given a non-templatable type. */ #define TYPE_TEMPLATE_INFO(NODE) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ || TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \ || RECORD_OR_UNION_TYPE_P (NODE) \ ? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE) /* Template information (if any) for an alias type. */ #define TYPE_ALIAS_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \ : NULL_TREE) /* If NODE is a type alias, this accessor returns the template info for the alias template (if any). Otherwise behave as TYPE_TEMPLATE_INFO. */ #define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \ (TYPE_ALIAS_P (NODE) \ ? TYPE_ALIAS_TEMPLATE_INFO (NODE) \ : TYPE_TEMPLATE_INFO (NODE)) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ || (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \ ? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \ : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))) #define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE)) #define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE)) #define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE) /* For a given TREE_VEC containing a template argument list, this property contains the number of arguments that are not defaulted. */ #define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE)) /* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT property. */ #define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) #if CHECKING_P #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) #else #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) #endif /* The list of typedefs - used in the template - that need access checking at template instantiation time. FIXME this should be associated with the TEMPLATE_DECL, not the TEMPLATE_INFO. */ #define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ (NODE))->typedefs_needing_access_checking /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. For each TREE_VEC containing the template arguments for a single level, it's possible to get or set the number of non defaulted template arguments by using the accessor macros GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. For a FIELD_DECL with a non-static data member initializer, this value is the FIELD_DECL it was instantiated from. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */ #define DECL_PACK_P(NODE) \ (DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE))) /* Determines if NODE is an expansion of one or more parameter packs, e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_P(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) /* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_PATTERN(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* The list of parameter packs used in the PACK_EXPANSION_* node. The TREE_VALUE of each TREE_LIST contains the parameter packs. */ #define PACK_EXPANSION_PARAMETER_PACKS(NODE) \ *(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \ ? &TREE_OPERAND (NODE, 1) \ : &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE))) /* Any additional template args to be applied when substituting into the pattern, set by tsubst_pack_expansion for partial instantiations. If this is a TREE_LIST, the TREE_VALUE of the first element is the usual template argument TREE_VEC, and the TREE_PURPOSE of later elements are enclosing functions that provided function parameter packs we'll need to map appropriately. */ #define PACK_EXPANSION_EXTRA_ARGS(NODE) \ *(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ ? &TYPE_MAX_VALUE_RAW (NODE) \ : &TREE_OPERAND ((NODE), 2)) /* True iff this pack expansion is within a function context. */ #define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE) /* True iff this pack expansion is for sizeof.... */ #define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE) /* True iff the wildcard can match a template parameter pack. */ #define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE) /* Determine if this is an argument pack. */ #define ARGUMENT_PACK_P(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) /* The arguments stored in an argument pack. Arguments are stored in a TREE_VEC, which may have length zero. */ #define ARGUMENT_PACK_ARGS(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Set the arguments stored in an argument pack. VALUE must be a TREE_VEC. */ #define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* Whether the argument pack is "incomplete", meaning that more arguments can still be deduced. Incomplete argument packs are only used when the user has provided an explicit template argument list for a variadic function template. Some of the explicit template arguments will be placed into the beginning of the argument pack, but additional arguments might still be deduced. */ #define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE)) /* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template arguments used to fill this pack. */ #define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) /* In an ARGUMENT_PACK_SELECT, the argument pack from which an argument will be selected. */ #define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) /* In an ARGUMENT_PACK_SELECT, the index of the argument we want to select. */ #define ARGUMENT_PACK_SELECT_INDEX(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) #define FOLD_EXPR_CHECK(NODE) \ TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \ BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) #define BINARY_FOLD_EXPR_CHECK(NODE) \ TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) /* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */ #define FOLD_EXPR_P(NODE) \ (TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR) /* True when NODE is a fold over a compound assignment operator. */ #define FOLD_EXPR_MODIFY_P(NODE) \ TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE)) /* An INTEGER_CST containing the tree code of the folded operator. */ #define FOLD_EXPR_OP(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0) /* The expression containing an unexpanded parameter pack. */ #define FOLD_EXPR_PACK(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1) /* In a binary fold expression, the argument with no unexpanded parameter packs. */ #define FOLD_EXPR_INIT(NODE) \ TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2) /* In a FUNCTION_DECL, the saved language-specific per-function data. */ #define DECL_SAVED_FUNCTION_DATA(NODE) \ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ ->u.saved_language_function) /* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */ #define REFERENCE_REF_P(NODE) \ (INDIRECT_REF_P (NODE) \ && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ && (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \ == REFERENCE_TYPE)) /* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a reference to VLA type, because it's used for VLA capture. */ #define REFERENCE_VLA_OK(NODE) \ (TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE))) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) #define CALL_OR_AGGR_INIT_CHECK(NODE) \ TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* True if the arguments to NODE should be evaluated in left-to-right order regardless of PUSH_ARGS_REVERSED. */ #define CALL_EXPR_ORDERED_ARGS(NODE) \ TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* True if the arguments to NODE should be evaluated in right-to-left order regardless of PUSH_ARGS_REVERSED. */ #define CALL_EXPR_REVERSE_ARGS(NODE) \ TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* True if CALL_EXPR was written as an operator expression, not a function call. */ #define CALL_EXPR_OPERATOR_SYNTAX(NODE) \ TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some of the time in C++14 mode. */ #define REF_PARENTHESIZED_P(NODE) \ TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize the object. */ #define AGGR_INIT_ZERO_FIRST(NODE) \ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero means that the call is the jump from a thunk to the thunked-to function. */ #define AGGR_INIT_FROM_THUNK_P(NODE) \ (AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag) /* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of CALL_EXPR_STATIC_CHAIN). */ #define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) #define AGGR_INIT_EXPR_SLOT(NODE) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) #define AGGR_INIT_EXPR_ARG(NODE, I) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) #define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) /* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if the argument count is zero when checking is enabled. Instead, do the pointer arithmetic to advance past the 3 fixed operands in a AGGR_INIT_EXPR. That produces a valid pointer to just past the end of the operand array, even if it's not valid to dereference it. */ #define AGGR_INIT_EXPR_ARGP(NODE) \ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) /* Abstract iterators for AGGR_INIT_EXPRs. */ /* Structure containing iterator state. */ struct aggr_init_expr_arg_iterator { tree t; /* the aggr_init_expr */ int n; /* argument count */ int i; /* next argument index */ }; /* Initialize the abstract argument list iterator object ITER with the arguments from AGGR_INIT_EXPR node EXP. */ inline void init_aggr_init_expr_arg_iterator (tree exp, aggr_init_expr_arg_iterator *iter) { iter->t = exp; iter->n = aggr_init_expr_nargs (exp); iter->i = 0; } /* Return the next argument from abstract argument list iterator object ITER, and advance its state. Return NULL_TREE if there are no more arguments. */ inline tree next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) { tree result; if (iter->i >= iter->n) return NULL_TREE; result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); iter->i++; return result; } /* Initialize the abstract argument list iterator object ITER, then advance past and return the first argument. Useful in for expressions, e.g. for (arg = first_aggr_init_expr_arg (exp, &iter); arg; arg = next_aggr_init_expr_arg (&iter)) */ inline tree first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) { init_aggr_init_expr_arg_iterator (exp, iter); return next_aggr_init_expr_arg (iter); } /* Test whether there are more arguments in abstract argument list iterator ITER, without changing its state. */ inline bool more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) { return (iter->i < iter->n); } /* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ #define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ (arg) = next_aggr_init_expr_arg (&(iter))) /* VEC_INIT_EXPR accessors. */ #define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0) #define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1) /* Indicates that a VEC_INIT_EXPR is a potential constant expression. Only set when the current function is constexpr. */ #define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) /* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ #define VEC_INIT_EXPR_VALUE_INIT(NODE) \ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) /* The condition under which this MUST_NOT_THROW_EXPR actually blocks exceptions. NULL_TREE means 'true'. */ #define MUST_NOT_THROW_COND(NODE) \ TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) \ (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE is in the process of being resolved. */ #define TYPENAME_IS_RESOLVING_P(NODE) \ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* This flag is true of a local VAR_DECL if it was declared in a for statement, but we are no longer in the scope of the for. */ #define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)) /* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL if we already emitted a warning about using it. */ #define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'break' stmts. */ #define LABEL_DECL_BREAK(NODE) \ DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'continue' stmts. */ #define LABEL_DECL_CONTINUE(NODE) \ DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'return' stmts in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */ #define LABEL_DECL_CDTOR(NODE) \ DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE)) /* True if NODE was declared with auto in its return type, but it has started compilation and so the return type might have been changed by return type deduction; its declared return type should be found in DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */ #define FNDECL_USED_AUTO(NODE) \ TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.anticipated_p) /* Is DECL NODE a hidden name? */ #define DECL_HIDDEN_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && TYPE_FUNCTION_OR_TEMPLATE_DECL_P (NODE) \ && DECL_ANTICIPATED (NODE)) /* True if this is a hidden class type. */ #define TYPE_HIDDEN_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ && DECL_ANTICIPATED (TYPE_NAME (NODE))) /* True for artificial decls added for OpenMP privatized non-static data members. */ #define DECL_OMP_PRIVATIZED_MEMBER(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p) /* Nonzero if NODE is an artificial FUNCTION_DECL for #pragma omp declare reduction. */ #define DECL_OMP_DECLARE_REDUCTION_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if NODE is a VAR_DECL which has been declared inline. */ #define DECL_VAR_DECLARED_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \ : false) #define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \ = true) /* True if NODE is a constant variable with a value-dependent initializer. */ #define DECL_DEPENDENT_INIT_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p) #define SET_DECL_DEPENDENT_INIT_P(NODE, X) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X)) /* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding declaration or one of VAR_DECLs for the user identifiers in it. */ #define DECL_DECOMPOSITION_P(NODE) \ (VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \ : false) /* The underlying artificial VAR_DECL for structured binding. */ #define DECL_DECOMP_BASE(NODE) \ (LANG_DECL_DECOMP_CHECK (NODE)->base) /* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members declared with constexpr specifier are implicitly inline variables. */ #define DECL_INLINE_VAR_P(NODE) \ (DECL_VAR_DECLARED_INLINE_P (NODE) \ || (cxx_dialect >= cxx17 \ && DECL_DECLARED_CONSTEXPR_P (NODE) \ && DECL_CLASS_SCOPE_P (NODE))) /* Nonzero if DECL was declared with '= delete'. */ #define DECL_DELETED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= default' (maybe implicitly). */ #define DECL_DEFAULTED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->defaulted_p) /* Nonzero if DECL is explicitly defaulted in the class body. */ #define DECL_DEFAULTED_IN_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) /* Nonzero if DECL was defaulted outside the class body. */ #define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) \ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* Returns true if TYPE is an integral or unscoped enumeration type. */ #define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) /* True if the class type TYPE is a literal type. */ #define CLASSTYPE_LITERAL_P(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. As a GNU extension, we also accept complex types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) \ || TREE_CODE (TYPE) == REAL_TYPE \ || TREE_CODE (TYPE) == COMPLEX_TYPE) /* True iff TYPE is cv decltype(nullptr). */ #define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, pointer-to-member types, and std::nullptr_t are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRDATAMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE) \ || NULLPTR_TYPE_P (TYPE)) /* Determines whether this type is a C++0x scoped enumeration type. Scoped enumerations types are introduced via "enum class" or "enum struct", e.g., enum class Color { Red, Green, Blue }; Scoped enumeration types are different from normal (unscoped) enumeration types in several ways: - The enumerators of a scoped enumeration type are only available within the scope of the enumeration type and not in the enclosing scope. For example, the Red color can be referred to with "Color::Red" but not "Red". - Scoped enumerators and enumerations do not implicitly convert to integers or 'bool'. - The underlying type of the enum is well-defined. */ #define SCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) /* Determine whether this is an unscoped enumeration type. */ #define UNSCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) /* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped enumeration type (1) or a normal (unscoped) enumeration type (0). */ #define SET_SCOPED_ENUM_P(TYPE, VAL) \ (ENUM_IS_SCOPED (TYPE) = (VAL)) #define SET_OPAQUE_ENUM_P(TYPE, VAL) \ (ENUM_IS_OPAQUE (TYPE) = (VAL)) #define OPAQUE_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) /* Determines whether an ENUMERAL_TYPE has an explicit underlying type. */ #define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) /* Returns the underlying type of the given enumeration type. The underlying type is determined in different ways, depending on the properties of the enum: - In C++0x, the underlying type can be explicitly specified, e.g., enum E1 : char { ... } // underlying type is char - In a C++0x scoped enumeration, the underlying type is int unless otherwises specified: enum class E2 { ... } // underlying type is int - Otherwise, the underlying type is determined based on the values of the enumerators. In this case, the ENUM_UNDERLYING_TYPE will not be set until after the definition of the enumeration is completed by finish_enum. */ #define ENUM_UNDERLYING_TYPE(TYPE) \ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-provided constructors, no brace-or-equal-initializers for non-static data members, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == VECTOR_TYPE \ ||TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a late-specified return type. */ #define TYPE_HAS_LATE_RETURN_TYPE(NODE) \ (TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE))) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\ && !TREE_HAS_CONSTRUCTOR (NODE)) /* True if NODE is a init-list used as a direct-initializer, i.e. B b{1,2}, not B b({1,2}) or B b = {1,2}. */ #define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) /* True if an uninitialized element in NODE should not be treated as implicitly value-initialized. Only used in constexpr evaluation. */ #define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \ (TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR should not be used as a variable initializer because it was loaded from a constexpr variable with mutable fields. */ #define CONSTRUCTOR_MUTABLE_POISON(NODE) \ (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE))) /* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather than C++11 functional cast syntax. */ #define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \ (TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */ #define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \ (TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE))) #define DIRECT_LIST_INIT_P(NODE) \ (BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE)) /* True if NODE represents a conversion for direct-initialization in a template. Set by perform_implicit_conversion_flags. */ #define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \ (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* True if NODE represents a dependent conversion of a non-type template argument. Set by maybe_convert_nontype_argument. */ #define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \ (TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* Nonzero means that an object of this type can not be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) /* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) /* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) /* Nonzero if there is a non-trivial X::X(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) /* Nonzero if there is no trivial default constructor for this class. */ #define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE is the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that the default constructor is trivial. */ #define TYPE_HAS_TRIVIAL_DFLT(NODE) \ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRDATAMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (TREE_CODE (NODE) != REFERENCE_TYPE \ && !VOID_TYPE_P (NODE) \ && TREE_CODE (NODE) != FUNCTION_TYPE \ && TREE_CODE (NODE) != METHOD_TYPE) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \ || TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)) /* Returns true if NODE is a pointer to function type. */ #define TYPE_PTRFN_P(NODE) \ (TYPE_PTR_P (NODE) \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function type. */ #define TYPE_REFFN_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE))) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTRMEM_P(NODE) \ (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Returns true if NODE is a pointer or a pointer-to-member. */ #define TYPE_PTR_OR_PTRMEM_P(NODE) \ (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\ cp_type_quals (NODE))) /* As above, but can be used in places that want an lvalue at the expense of not necessarily having the correct cv-qualifiers. */ #define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* The canonical internal RECORD_TYPE from the POINTER_TYPE to METHOD_TYPE. */ #define TYPE_PTRMEMFUNC_TYPE(NODE) \ TYPE_LANG_SLOT_1 (NODE) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) \ (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE))) /* The type in question for an UNDERLYING_TYPE. */ #define UNDERLYING_TYPE_TYPE(NODE) \ (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE))) /* The type in question for BASES. */ #define BASES_TYPE(NODE) \ (TYPE_VALUES_RAW (BASES_CHECK (NODE))) #define BASES_DIRECT(NODE) \ TREE_LANG_FLAG_0 (BASES_CHECK (NODE)) /* The expression in question for a DECLTYPE_TYPE. */ #define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE))) /* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an id-expression or a member-access expression. When false, it was parsed as a full expression. */ #define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag /* These flags indicate that we want different semantics from normal decltype: lambda capture just drops references, init capture uses auto semantics, lambda proxies look through implicit dereference. */ #define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_INIT_CAPTURE(NODE) \ TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \ TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_REF_CAPTURE(NODE) \ TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a lambda capture field for an array of runtime bound. */ #define DECL_VLA_CAPTURE_P(NODE) \ DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE)) /* Nonzero for PARM_DECL node means that this is an array function parameter, i.e, a[] rather than *a. */ #define DECL_ARRAY_PARAMETER_P(NODE) \ DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE)) /* Nonzero for a FIELD_DECL who's NSMDI is currently being instantiated. */ #define DECL_INSTANTIATING_NSDMI_P(NODE) \ DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a simple (no explicit initializer) lambda capture field, making it invisible to name lookup in unevaluated contexts. */ #define DECL_NORMAL_CAPTURE_P(NODE) \ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) /* Define fields and accessors for nodes representing declared names. */ /* Nonzero if TYPE is an unnamed class with a typedef for linkage purposes. */ #define TYPE_WAS_UNNAMED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous) /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_PARMS(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_RESULT(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result /* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS lists all instantiations and specializations of the function so that tsubst_friend_function can reassign them to another template if we find that the namespace-scope template is really a partial instantiation of a friend template. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations, so that if we explicitly specialize a partial instantiation we can walk the list in maybe_process_partial_specialization and reassign them or complain as appropriate. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for other templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) \ DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE)) /* For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*, int'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL for the partial specialization. The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for other templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \ DECL_SIZE (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Nonzero for a raw template parameter node. */ #define TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \ || TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (NODE) == TEMPLATE_PARM_INDEX) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero for a DECL that represents a function template. */ #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a class template or alias template. */ #define DECL_TYPE_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL) /* Nonzero for a DECL that represents a class template. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a TEMPLATE_DECL that represents an alias template. */ #define DECL_ALIAS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE)) /* Nonzero if NODE declares a function. */ #define DECL_DECLARES_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, called the injected-class-name, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header and is not a partial specialization. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Nonzero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \ && !DECL_USE_TEMPLATE (DECL)) /* Nonzero if DECL is a function generated from a function 'temploid', i.e. template, member of class template, or dependent friend. */ #define DECL_TEMPLOID_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INSTANTIATION (DECL) \ || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL)) /* Nonzero if DECL is either defined implicitly by the compiler or generated from a temploid. */ #define DECL_GENERATED_P(DECL) \ (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (!processing_template_parmlist \ && processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. This flag does not indicate whether or not the decl is defined in the current translation unit; it indicates whether or not we should emit the decl at the end of compilation if it is defined and needed. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) \ && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE))) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE))) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as an expr in operand 1, and integer_zero_node in operand 0. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE_LINEAR)) /* Nonzero if this transaction expression's body contains statements. */ #define TRANSACTION_EXPR_IS_STMT(NODE) \ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) #define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3) #define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE)) /* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */ #define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4) /* RANGE_FOR_STMT accessors. These give access to the declarator, expression, body, and scope of the statement, respectively. */ #define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) #define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) #define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) #define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3) #define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4) #define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE)) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) #define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3) /* True if there are case labels for all possible values of switch cond, either because there is a default: case label or because the case label ranges cover all values. */ #define SWITCH_STMT_ALL_CASES_P(NODE) \ TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE)) /* True if the body of a switch stmt contains no BREAK_STMTs. */ #define SWITCH_STMT_NO_BREAK_P(NODE) \ TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE)) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR is the result of list-initialization of a temporary. */ #define TARGET_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR expresses direct-initialization of an object to be named later. */ #define TARGET_EXPR_DIRECT_INIT_P(NODE) \ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) /* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if the initializer has void type, it's doing something more complicated. */ #define SIMPLE_TARGET_EXPR_P(NODE) \ (TREE_CODE (NODE) == TARGET_EXPR \ && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE)))) /* True if EXPR expresses direct-initialization of a TYPE. */ #define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) /* True if this CONVERT_EXPR is for a conversion to virtual base in an NSDMI, and should be re-evaluated when used in a constructor. */ #define CONVERT_EXPR_VBASE_PATH(NODE) \ TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE)) /* True if SIZEOF_EXPR argument is type. */ #define SIZEOF_EXPR_TYPE_P(NODE) \ TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE)) /* True if the ALIGNOF_EXPR was spelled "alignof". */ #define ALIGNOF_EXPR_STD_P(NODE) \ TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE)) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type, /* "typename" types. */ scope_type /* namespace or tagged type name followed by :: */ }; /* The various kinds of lvalues we distinguish. */ enum cp_lvalue_kind_flags { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */ clk_class = 4, /* A prvalue of class or array type. */ clk_bitfield = 8, /* An lvalue for a bit-field. */ clk_packed = 16 /* An lvalue for a packed field. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum cp_lvalue_kind_flags. */ typedef int cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ }; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ }; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ sfk_constructor, /* A constructor. */ sfk_copy_constructor, /* A copy constructor. */ sfk_move_constructor, /* A move constructor. */ sfk_copy_assignment, /* A copy assignment operator. */ sfk_move_assignment, /* A move assignment operator. */ sfk_destructor, /* A destructor. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion, /* A conversion operator. */ sfk_deduction_guide, /* A class template deduction guide. */ sfk_inheriting_constructor /* An inheriting constructor */ }; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ }; enum duration_kind { dk_static, dk_thread, dk_auto, dk_dynamic }; /* Bitmask flags to control type substitution. */ enum tsubst_flags { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ tf_decltype = 1 << 7, /* We are the operand of decltype. Used to implement the special rules for calls in decltype (5.2.2/11). */ tf_partial = 1 << 8, /* Doing initial explicit argument substitution in fn_type_unification. */ tf_fndecl_type = 1 << 9, /* Substituting the type of a function declaration. */ tf_no_cleanup = 1 << 10, /* Do not build a cleanup (build_target_expr and friends) */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; /* This type is used for parameters and variables which hold combinations of the flags in enum tsubst_flags. */ typedef int tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ enum base_access_flags { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum base_access_flags. */ typedef int base_access; /* The various kinds of access check during parsing. */ enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ }; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ }; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* Nonzero if we are inside eq_specializations, which affects comparison of PARM_DECLs in cp_tree_equal. */ extern int comparing_specializations; /* In parser.c. */ /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. This is a count since operands to sizeof can be nested. */ extern int cp_unevaluated_operand; /* RAII class used to inhibit the evaluation of operands during parsing and template instantiation. Evaluation warnings are also inhibited. */ struct cp_unevaluated { cp_unevaluated (); ~cp_unevaluated (); }; /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT }; // An RAII class used to create a new pointer map for local // specializations. When the stack goes out of scope, the // previous pointer map is restored. enum lss_policy { lss_blank, lss_copy }; struct local_specialization_stack { local_specialization_stack (lss_policy = lss_blank); ~local_specialization_stack (); hash_map<tree, tree> *saved; }; /* in class.c */ extern int current_class_depth; /* An array of all local classes present in this translation unit, in declaration order. */ extern GTY(()) vec<tree, va_gc> *local_classes; /* in decl.c */ /* An array of static vars & fns. */ extern GTY(()) vec<tree, va_gc> *static_decls; /* An array of vtable-needing types that have no key function, or have an emitted key function. */ extern GTY(()) vec<tree, va_gc> *keyed_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler doesn't allow '.' in symbol names. */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #else /* NO_DOT_IN_LABEL */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #else /* NO_DOLLAR_IN_LABEL */ #define AUTO_TEMP_NAME "__tmp_" #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \ sizeof (AUTO_TEMP_NAME) - 1)) #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ #define LAMBDANAME_PREFIX "__lambda" #define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d" #define UDLIT_OP_ANSI_PREFIX "operator\"\"" #define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s" #define UDLIT_OP_MANGLED_PREFIX "li" #define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s" #define UDLIT_OPER_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ UDLIT_OP_ANSI_PREFIX, \ sizeof (UDLIT_OP_ANSI_PREFIX) - 1)) #define UDLIT_OP_SUFFIX(ID_NODE) \ (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1) #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1)) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. Two if we're done with front-end processing. */ extern int at_eof; /* True if note_mangling_alias should enqueue mangling aliases for later generation, rather than emitting them right away. */ extern bool defer_mangling_aliases; /* True if noexcept is part of the type (i.e. in C++17). */ extern bool flag_noexcept_type; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; /* Likewise, for thread local storage. */ extern GTY(()) tree tls_aggregates; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) #define LOOKUP_NORMAL (LOOKUP_PROTECT) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 1) /* Non-converting (i.e., "explicit") constructors are not tried. This flag indicates that we are not performing direct-initialization. */ #define LOOKUP_ONLYCONVERTING (1 << 2) #define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 3) /* We're performing a user-defined conversion, so more user-defined conversions are not permitted (only built-in conversions). */ #define LOOKUP_NO_CONVERSION (1 << 4) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 5) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 6) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 7) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 8) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1) /* We're trying to treat an lvalue as an rvalue. */ #define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1) /* We're inside an init-list, so narrowing conversions are ill-formed. */ #define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) /* We're looking up a constructor for list-initialization. */ #define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1) /* This is the first parameter of a copy constructor. */ #define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1) /* We only want to consider list constructors. */ #define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) /* Return after determining which function to call and checking access. Used by sythesized_method_walk to determine which functions will be called to initialize subobjects, in order to determine exception specification and possible implicit delete. This is kind of a hack, but exiting early avoids problems with trying to perform argument conversions when the class isn't complete yet. */ #define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) /* Used by calls from defaulted functions to limit the overload set to avoid cycles trying to declare them (core issue 1092). */ #define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) /* Used in calls to store_init_value to suppress its usual call to digest_init. */ #define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) /* An instantiation with explicit template arguments. */ #define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1) /* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */ #define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1) /* Used by case_conversion to disregard non-integral conversions. */ #define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1) /* Used for delegating constructors in order to diagnose self-delegation. */ #define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 /* #define CONV_NONCONVERTING 32 */ #define CONV_FORCE_TEMP 64 #define CONV_FOLD 128 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) #define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ #define COMPARE_STRUCTURAL 8 /* The comparison is intended to be structural. The actual comparison will be identical to COMPARE_STRICT. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Used with start_decl's initialized parameter. */ #define SD_UNINITIALIZED 0 #define SD_INITIALIZED 1 #define SD_DEFAULTED 2 #define SD_DELETED 3 /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) #define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \ TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))) #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* For a C++17 class deduction placeholder, the template it represents. */ #define CLASS_PLACEHOLDER_TEMPLATE(NODE) \ (DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE)))) /* Contexts in which auto deduction occurs. These flags are used to control diagnostics in do_auto_deduction. */ enum auto_deduction_context { adc_unspecified, /* Not given */ adc_variable_type, /* Variable initializer deduction */ adc_return_type, /* Return type deduction */ adc_unify, /* Template argument deduction */ adc_requirement, /* Argument deduction constraint */ adc_decomp_type /* Decomposition declaration initializer deduction */ }; /* True if this type-parameter belongs to a class template, used by C++17 class template argument deduction. */ #define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */ #define AUTO_IS_DECLTYPE(NODE) \ (TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the top-level entity. TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments identical to their defaults. TFF_NO_TEMPLATE_BINDINGS: do not print information about the template arguments for a function template specialization. TFF_POINTER: we are printing a pointer type. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) #define TFF_UNQUALIFIED_NAME (1 << 11) #define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) #define TFF_NO_TEMPLATE_BINDINGS (1 << 13) #define TFF_POINTER (1 << 14) /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); /* Various flags for the overloaded operator information. */ enum ovl_op_flags { OVL_OP_FLAG_NONE = 0, /* Don't care. */ OVL_OP_FLAG_UNARY = 1, /* Is unary. */ OVL_OP_FLAG_BINARY = 2, /* Is binary. */ OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */ OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */ OVL_OP_FLAG_DELETE = 1, /* operator delete. */ OVL_OP_FLAG_VEC = 2 /* vector new or delete. */ }; /* Compressed operator codes. Order is determined by operators.def and does not match that of tree_codes. */ enum ovl_op_code { OVL_OP_ERROR_MARK, OVL_OP_NOP_EXPR, #define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE, #define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */ #include "operators.def" OVL_OP_MAX }; struct GTY(()) ovl_op_info_t { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The (regular) tree code. */ enum tree_code tree_code : 16; /* The (compressed) operator code. */ enum ovl_op_code ovl_op_code : 8; /* The ovl_op_flags of the operator */ unsigned flags : 8; }; /* Overloaded operator info indexed by ass_op_p & ovl_op_code. */ extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX]; /* Mapping from tree_codes to ovl_op_codes. */ extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES]; /* Mapping for ambi-ary operators from the binary to the unary. */ extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX]; /* Given an ass_op_p boolean and a tree code, return a pointer to its overloaded operator info. Tree codes for non-overloaded operators map to the error-operator. */ #define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \ (&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]]) /* Overloaded operator info for an identifier for which IDENTIFIER_OVL_OP_P is true. */ #define IDENTIFIER_OVL_OP_INFO(NODE) \ (&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)]) #define IDENTIFIER_OVL_OP_FLAGS(NODE) \ (IDENTIFIER_OVL_OP_INFO (NODE)->flags) /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* Non-static member functions have an optional virt-specifier-seq. There is a VIRT_SPEC value for each virt-specifier. They can be combined by bitwise-or to form the complete set of virt-specifiers for a member function. */ enum virt_specifier { VIRT_SPEC_UNSPECIFIED = 0x0, VIRT_SPEC_FINAL = 0x1, VIRT_SPEC_OVERRIDE = 0x2 }; /* A type-qualifier, or bitmask therefore, using the VIRT_SPEC constants. */ typedef int cp_virt_specifiers; /* Wherever there is a function-cv-qual, there could also be a ref-qualifier: [dcl.fct] The return type, the parameter-type-list, the ref-qualifier, and the cv-qualifier-seq, but not the default arguments or the exception specification, are part of the function type. REF_QUAL_NONE Ordinary member function with no ref-qualifier REF_QUAL_LVALUE Member function with the &-ref-qualifier REF_QUAL_RVALUE Member function with the &&-ref-qualifier */ enum cp_ref_qualifier { REF_QUAL_NONE = 0, REF_QUAL_LVALUE = 1, REF_QUAL_RVALUE = 2 }; /* A storage class. */ enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable }; /* An individual decl-specifier. This is used to index the array of locations for the declspecs in struct cp_decl_specifier_seq below. */ enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_alias, ds_constexpr, ds_complex, ds_thread, ds_type_spec, ds_redefined_builtin_type_spec, ds_attribute, ds_std_attribute, ds_storage_class, ds_long_long, ds_concept, ds_last /* This enumerator must always be the last one. */ }; /* A decl-specifier-seq. */ struct cp_decl_specifier_seq { /* An array of locations for the declaration sepecifiers, indexed by enum cp_decl_spec_word. */ source_location locations[ds_last]; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* The c++11 attributes that follows the type specifier. */ tree std_attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* True iff TYPE_SPEC defines a class or enum. */ BOOL_BITFIELD type_definition_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff at least one type-specifier was found. */ BOOL_BITFIELD any_type_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "__intN" was explicitly provided. */ BOOL_BITFIELD explicit_intN_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; /* True iff ds_thread is set for __thread, not thread_local. */ BOOL_BITFIELD gnu_thread_keyword_p : 1; /* True iff the type is a decltype. */ BOOL_BITFIELD decltype_p : 1; }; /* The various kinds of declarators. */ enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_decomp, cdk_error }; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is a template parameter pack. */ bool template_parameter_pack_p; /* Location within source. */ location_t loc; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ ENUM_BITFIELD (cp_declarator_kind) kind : 4; /* Whether we parsed an ellipsis (`...') just before the declarator, to indicate this is a parameter pack. */ BOOL_BITFIELD parameter_pack_p : 1; /* If this declarator is parenthesized, this the open-paren. It is UNKNOWN_LOCATION when not parenthesized. */ location_t parenthesized; location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and cdk_function. */ /* GNU Attributes that apply to this declarator. If the declarator is a pointer or a reference, these attribute apply to the type pointed to. */ tree attributes; /* Standard C++11 attributes that apply to this declarator. If the declarator is a pointer or a reference, these attributes apply to the pointer, rather than to the type pointed to. */ tree std_attributes; /* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator. For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function as a TREE_LIST of decl/default. */ tree parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The virt-specifiers for the function. */ cp_virt_specifiers virt_specifiers; /* The ref-qualifier for the function. */ cp_ref_qualifier ref_qualifier; /* The transaction-safety qualifier for the function. */ tree tx_qualifier; /* The exception-specification for the function. */ tree exception_specification; /* The late-specified return type, if any. */ tree late_return_type; /* The trailing requires-clause, if any. */ tree requires_clause; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; /* For cdk_reference */ struct { /* The cv-qualifiers for the reference. These qualifiers are only used to diagnose ill-formed code. */ cp_cv_quals qualifiers; /* Whether this is an rvalue reference */ bool rvalue_ref; } reference; } u; }; /* A level of template instantiation. */ struct GTY((chain_next ("%h.next"))) tinst_level { /* The immediately deeper level in the chain. */ struct tinst_level *next; /* The original node. TLDCL can be a DECL (for a function or static data member), a TYPE (for a class), depending on what we were asked to instantiate, or a TREE_LIST with the template as PURPOSE and the template args as VALUE, if we are substituting for overload resolution. In all these cases, TARGS is NULL. However, to avoid creating TREE_LIST objects for substitutions if we can help, we store PURPOSE and VALUE in TLDCL and TARGS, respectively. So TLDCL stands for TREE_LIST or DECL (the template is a DECL too), whereas TARGS stands for the template arguments. */ tree tldcl, targs; private: /* Return TRUE iff the original node is a split list. */ bool split_list_p () const { return targs; } /* Return TRUE iff the original node is a TREE_LIST object. */ bool tree_list_p () const { return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST; } /* Return TRUE iff the original node is not a list, split or not. */ bool not_list_p () const { return !split_list_p () && !tree_list_p (); } /* Convert (in place) the original node from a split list to a TREE_LIST. */ tree to_list (); public: /* Release storage for OBJ and node, if it's a TREE_LIST. */ static void free (tinst_level *obj); /* Return TRUE iff the original node is a list, split or not. */ bool list_p () const { return !not_list_p (); } /* Return the original node; if it's a split list, make it a TREE_LIST first, so that it can be returned as a single tree object. */ tree get_node () { if (!split_list_p ()) return tldcl; else return to_list (); } /* Return the original node if it's a DECL or a TREE_LIST, but do NOT convert a split list to a TREE_LIST: return NULL instead. */ tree maybe_get_node () const { if (!split_list_p ()) return tldcl; else return NULL_TREE; } /* The location where the template is instantiated. */ location_t locus; /* errorcount + sorrycount when we pushed this level. */ unsigned short errors; /* Count references to this object. If refcount reaches refcount_infinity value, we don't increment or decrement the refcount anymore, as the refcount isn't accurate anymore. The object can be still garbage collected if unreferenced from anywhere, which might keep referenced objects referenced longer than otherwise necessary. Hitting the infinity is rare though. */ unsigned short refcount; /* Infinity value for the above refcount. */ static const unsigned short refcount_infinity = (unsigned short) ~0; }; bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec); /* Return the type of the `this' parameter of FNTYPE. */ inline tree type_of_this_parm (const_tree fntype) { function_args_iterator iter; gcc_assert (TREE_CODE (fntype) == METHOD_TYPE); function_args_iter_init (&iter, fntype); return function_args_iter_cond (&iter); } /* Return the class of the `this' parameter of FNTYPE. */ inline tree class_of_this_parm (const_tree fntype) { return TREE_TYPE (type_of_this_parm (fntype)); } /* True iff T is a variable template declaration. */ inline bool variable_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (!PRIMARY_TEMPLATE_P (t)) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r); return false; } /* True iff T is a variable concept definition. That is, T is a variable template declared with the concept specifier. */ inline bool variable_concept_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r); return false; } /* True iff T is a concept definition. That is, T is a variable or function template declared with the concept specifier. */ inline bool concept_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r); return false; } /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* Various dump ids. */ extern int class_dump_id; extern int raw_dump_id; /* in call.c */ extern bool check_dtor_name (tree, tree); int magic_varargs_p (tree); extern tree build_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_addr_func (tree, tsubst_flags_t); extern void set_flags_from_callee (tree); extern tree build_call_a (tree, int, tree*); extern tree build_call_n (tree, int, ...); extern bool null_ptr_cst_p (tree); extern bool null_member_pointer_value_p (tree); extern bool sufficient_parms_p (const_tree); extern tree type_decays_to (tree); extern tree extract_call_expr (tree); extern tree build_user_type_conversion (tree, tree, int, tsubst_flags_t); extern tree build_new_function_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *, tree *, tree, tree, tree *, tsubst_flags_t); extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **, tree, int, tsubst_flags_t); extern tree build_new_op (location_t, enum tree_code, int, tree, tree, tree, tree *, tsubst_flags_t); extern tree build_op_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool aligned_allocation_fn_p (tree); extern bool usual_deallocation_fn_p (tree); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree, tsubst_flags_t); extern bool can_convert (tree, tree, tsubst_flags_t); extern bool can_convert_standard (tree, tree, tsubst_flags_t); extern bool can_convert_arg (tree, tree, tree, int, tsubst_flags_t); extern bool can_convert_arg_bad (tree, tree, tree, int, tsubst_flags_t); extern location_t get_fndecl_argument_location (tree, int); /* A class for recording information about access failures (e.g. private fields), so that we can potentially supply a fix-it hint about an accessor (from a context in which the constness of the object is known). */ class access_failure_info { public: access_failure_info () : m_was_inaccessible (false), m_basetype_path (NULL_TREE), m_field_decl (NULL_TREE) {} void record_access_failure (tree basetype_path, tree field_decl); void maybe_suggest_accessor (bool const_p) const; private: bool m_was_inaccessible; tree m_basetype_path; tree m_field_decl; }; extern bool enforce_access (tree, tree, tree, tsubst_flags_t, access_failure_info *afi = NULL); extern void push_defarg_context (tree); extern void pop_defarg_context (void); extern tree convert_default_arg (tree, tree, tree, int, tsubst_flags_t); extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t); extern tree build_x_va_arg (source_location, tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t); extern bool is_properly_derived_from (tree, tree); extern tree initialize_reference (tree, tree, int, tsubst_flags_t); extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern bool type_has_extended_temps (tree); extern tree strip_top_quals (tree); extern bool reference_related_p (tree, tree); extern int remaining_arguments (tree); extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t); extern tree perform_direct_initialization_if_possible (tree, tree, bool, tsubst_flags_t); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, int, tree *, tsubst_flags_t); extern bool is_std_init_list (tree); extern bool is_list_ctor (tree); extern void validate_conversion_obstack (void); extern void mark_versions_used (tree); extern tree get_function_version_dispatcher (tree); /* in class.c */ extern tree build_vfield_ref (tree, tree); extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node); extern tree build_base_path (enum tree_code, tree, tree, int, tsubst_flags_t); extern tree convert_to_base (tree, tree, bool, bool, tsubst_flags_t); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern bool add_method (tree, tree, bool); extern tree declared_access (tree); extern tree currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree outermost_open_class (void); extern tree current_nonlambda_class_type (void); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int *); extern void init_class_processing (void); extern int is_empty_class (tree); extern bool is_really_empty_class (tree); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void build_self_reference (void); extern int same_signature_p (const_tree, const_tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern bool vptr_via_virtual_p (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); extern bool default_ctor_p (tree); extern bool type_has_user_nondefault_constructor (tree); extern tree in_class_defaulted_default_constructor (tree); extern bool user_provided_p (tree); extern bool type_has_user_provided_constructor (tree); extern bool type_has_non_user_provided_default_constructor (tree); extern bool vbase_has_user_provided_move_assign (tree); extern tree default_init_uninitialized_part (tree); extern bool trivial_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_virtual_destructor (tree); extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared); extern bool type_build_ctor_call (tree); extern bool type_build_dtor_call (tree); extern void explain_non_literal_class (tree); extern void inherit_targ_abi_tags (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); extern void check_abi_tags (tree); extern tree missing_abi_tags (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree* decl_cloned_function_p (const_tree, bool); extern void clone_function_decl (tree, bool); extern void adjust_clone_args (tree); extern void deduce_noexcept_on_destructor (tree); extern bool uniquely_derived_from_p (tree, tree); extern bool publicly_uniquely_derived_p (tree, tree); extern tree common_enclosing_class (tree, tree); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree, tsubst_flags_t); extern tree convert_from_reference (tree); extern tree force_rvalue (tree, tsubst_flags_t); extern tree ocp_convert (tree, tree, int, int, tsubst_flags_t); extern tree cp_convert (tree, tree, tsubst_flags_t); extern tree cp_convert_and_check (tree, tree, tsubst_flags_t); extern tree cp_fold_convert (tree, tree); extern tree cp_get_callee (tree); extern tree cp_get_callee_fndecl (tree); extern tree cp_get_callee_fndecl_nofold (tree); extern tree cp_get_fndecl_from_callee (tree, bool fold = true); extern tree convert_to_void (tree, impl_conv_void, tsubst_flags_t); extern tree convert_force (tree, tree, int, tsubst_flags_t); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern bool can_convert_qual (tree, tree); extern tree perform_qualification_conversions (tree, tree); extern bool tx_safe_fn_type_p (tree); extern tree tx_unsafe_fn_variant (tree); extern bool fnptr_conv_p (tree, tree); extern tree strip_fnptr_conv (tree); /* in name-lookup.c */ extern void maybe_push_cleanup_level (tree); extern tree make_anon_name (void); extern tree check_for_out_of_scope_variable (tree); extern void dump (cp_binding_level &ref); extern void dump (cp_binding_level *ptr); extern void print_other_binding_stack (cp_binding_level *); extern tree maybe_push_decl (tree); extern tree current_decl_namespace (void); /* decl.c */ extern tree poplevel (int, int, int); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern void note_break_stmt (void); extern bool note_iteration_stmt_body_start (void); extern void note_iteration_stmt_body_end (bool); extern tree make_lambda_name (void); extern int decls_match (tree, tree, bool = true); extern bool maybe_version_functions (tree, tree, bool); extern tree duplicate_decls (tree, tree, bool); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree build_typename_type (tree, tree, tree, tag_types); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree build_library_fn_ptr (const char *, tree, int); extern tree build_cp_library_fn_ptr (const char *, tree, int); extern tree push_library_fn (tree, tree, tree, int); extern tree push_void_library_fn (tree, tree, int); extern tree push_throw_library_fn (tree, tree); extern void warn_misplaced_attr_for_class_type (source_location location, tree class_type); extern tree check_tag_decl (cp_decl_specifier_seq *, bool); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern bool check_array_initializer (tree, tree, tree); extern void cp_finish_decl (tree, tree, bool, tree, int); extern tree lookup_decomp_type (tree); extern void cp_maybe_mangle_decomp (tree, tree, unsigned int); extern void cp_finish_decomp (tree, tree, unsigned int); extern int cp_complete_array_type (tree *, tree, bool); extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, tree, cp_cv_quals); extern tree grokparms (tree, tree *); extern int copy_fn_p (const_tree); extern bool move_fn_p (const_tree); extern bool move_signature_fn_p (const_tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern bool grok_ctor_properties (const_tree, const_tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern void xref_basetypes (tree, tree); extern tree start_enum (tree, tree, tree, tree, bool, bool *); extern void finish_enum_value_list (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree, tree, location_t); extern tree lookup_enumerator (tree, tree); extern bool start_preparsed_function (tree, tree, int); extern bool start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (bool); extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); extern void maybe_register_incomplete_var (tree); extern void maybe_commonize_var (tree); extern void complete_vars (tree); extern tree static_fn_type (tree); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern tree compute_array_index_type (tree, tree, tsubst_flags_t); extern tree check_default_argument (tree, tree, tsubst_flags_t); extern int wrapup_namespace_globals (); extern tree create_implicit_typedef (tree, tree); extern int local_variable_p (const_tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree cxx_builtin_function (tree decl); extern tree cxx_builtin_function_ext_scope (tree decl); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern tree cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern bool is_direct_enum_init (tree, tree); extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *); extern tree check_var_type (tree, tree); extern tree reshape_init (tree, tree, tsubst_flags_t); extern tree next_initializable_field (tree); extern tree fndecl_declared_return_type (tree); extern bool undeduced_auto_decl (tree); extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error); extern tree finish_case_label (location_t, tree, tree); extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t); extern bool check_array_designated_initializer (constructor_elt *, unsigned HOST_WIDE_INT); extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t); /* in decl2.c */ extern void record_mangling (tree, bool); extern void overwrite_mangling (tree, tree); extern void note_mangling_alias (tree, tree); extern void generate_mangling_aliases (void); extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier); extern tree build_pointer_ptrmemfn_type (tree); extern tree change_return_type (tree, tree); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern bool vague_linkage_p (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (location_t, tree, tree, bool); extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree, tree, tree); extern bool any_dependent_type_attributes_p (tree); extern tree cp_reconstruct_complex_type (tree, tree); extern bool attributes_naming_typedef_ok (tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cxx_post_compilation_parsing_cleanups (void); extern tree coerce_new_type (tree); extern tree coerce_delete_type (tree); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void reset_type_linkage (tree); extern void tentative_decl_linkage (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool decl_defined_p (tree); extern bool decl_constant_var_p (tree); extern bool decl_maybe_constant_var_p (tree); extern void no_linkage_error (tree); extern void check_default_args (tree); extern bool mark_used (tree); extern bool mark_used (tree, tsubst_flags_t); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree, bool); extern tree set_guard (tree); extern tree get_tls_wrapper_fn (tree); extern void mark_needed (tree); extern bool decl_needed_p (tree); extern void note_vague_linkage_fn (tree); extern void note_variable_template_instantiation (tree); extern tree build_artificial_parm (tree, tree, tree); extern bool possibly_inlined_p (tree); extern int parm_index (tree); extern tree vtv_start_verification_constructor_init_function (void); extern tree vtv_finish_verification_constructor_init_function (tree); extern bool cp_omp_mappable_type (tree); /* in error.c */ extern const char *type_as_string (tree, int); extern const char *type_as_string_translate (tree, int); extern const char *decl_as_string (tree, int); extern const char *decl_as_string_translate (tree, int); extern const char *decl_as_dwarf_string (tree, int); extern const char *expr_as_string (tree, int); extern const char *lang_decl_name (tree, int, bool); extern const char *lang_decl_dwarf_name (tree, int, bool); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void maybe_warn_variadic_templates (void); extern void maybe_warn_cpp0x (cpp0x_warn_str str); extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern location_t location_of (tree); extern void qualified_name_lookup_error (tree, tree, tree, location_t); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (tree); extern int nothrow_libfn_p (const_tree); extern void check_handlers (tree); extern tree finish_noexcept_expr (tree, tsubst_flags_t); extern bool expr_noexcept_p (tree, tsubst_flags_t); extern void perform_deferred_noexcept_checks (void); extern bool nothrow_spec_p (const_tree); extern bool type_noexcept_p (const_tree); extern bool type_throw_all_p (const_tree); extern tree build_noexcept_spec (tree, int); extern void choose_personality_routine (enum languages); extern tree build_must_not_throw_expr (tree,tree); extern tree eh_type_info (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern tree build_eh_type_type (tree); extern tree cp_protect_cleanup_actions (void); extern tree create_try_catch_expr (tree, tree); /* in expr.c */ extern tree cplus_expand_constant (tree); extern tree mark_use (tree expr, bool rvalue_p, bool read_p, location_t = UNKNOWN_LOCATION, bool reject_builtin = true); extern tree mark_rvalue_use (tree, location_t = UNKNOWN_LOCATION, bool reject_builtin = true); extern tree mark_lvalue_use (tree); extern tree mark_lvalue_use_nonread (tree); extern tree mark_type_use (tree); extern tree mark_discarded_use (tree); extern void mark_exp_read (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); extern void set_global_friend (tree); extern bool is_global_friend (tree); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int, tsubst_flags_t); extern int is_class_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_value_init (tree, tsubst_flags_t); extern tree build_value_init_noctor (tree, tsubst_flags_t); extern tree get_nsdmi (tree, bool, tsubst_flags_t); extern tree build_offset_ref (tree, tree, bool, tsubst_flags_t); extern tree throw_bad_array_new_length (void); extern bool type_has_new_extended_alignment (tree); extern unsigned malloc_alignment (void); extern tree build_new (vec<tree, va_gc> **, tree, tree, vec<tree, va_gc> **, int, tsubst_flags_t); extern tree get_temp_regvar (tree, tree); extern tree build_vec_init (tree, tree, tree, bool, int, tsubst_flags_t); extern tree build_delete (tree, tree, special_function_kind, int, int, tsubst_flags_t); extern void push_base_cleanups (void); extern tree build_vec_delete (tree, tree, special_function_kind, int, tsubst_flags_t); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree scalar_constant_value (tree); extern tree decl_really_constant_value (tree); extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); extern tree build_vtbl_address (tree); extern bool maybe_reject_flexarray_init (tree, tree); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree, location_t = UNKNOWN_LOCATION); extern tree unqualified_fn_lookup_error (cp_expr); extern tree make_conv_op_name (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern void fit_decomposition_lang_decl (tree, tree); extern tree copy_decl (tree CXX_MEM_STAT_INFO); extern tree copy_type (tree CXX_MEM_STAT_INFO); extern tree cxx_make_type (enum tree_code); extern tree make_class_type (enum tree_code); extern const char *get_identifier_kind_name (tree); extern void set_identifier_kind (tree, cp_identifier_kind); extern bool cxx_init (void); extern void cxx_finish (void); extern bool in_main_input_context (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern bool trivial_fn_p (tree); extern tree forward_parm (tree); extern bool is_trivially_xible (enum tree_code, tree, tree); extern bool is_xible (enum tree_code, tree, tree); extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error); extern void after_nsdmi_defaulted_late_checks (tree); extern bool maybe_explain_implicit_delete (tree); extern void explain_implicit_non_constexpr (tree); extern void deduce_inheriting_ctor (tree); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (const_tree, tree); extern int num_artificial_parms_for (const_tree); extern tree make_alias_for (tree, tree); extern tree get_copy_ctor (tree, tsubst_flags_t); extern tree get_copy_assign (tree); extern tree get_default_ctor (tree); extern tree get_dtor (tree, tsubst_flags_t); extern tree strip_inheriting_ctors (tree); extern tree inherited_ctor_binfo (tree); extern bool ctor_omit_inherited_parms (tree); extern tree locate_ctor (tree); extern tree implicitly_declare_fn (special_function_kind, tree, bool, tree, tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* In parser.c */ extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool, unsigned short); extern bool parsing_nsdmi (void); extern bool parsing_default_capturing_generic_lambda_in_template (void); extern void inject_this_parameter (tree, cp_cv_quals); extern location_t defarg_location (tree); extern void maybe_show_extern_c_location (void); /* in pt.c */ extern bool check_template_shadow (tree); extern bool check_auto_in_tmpl_args (tree, tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern void check_unqualified_spec_or_inst (tree, location_t); extern tree check_explicit_specialization (tree, tree, int, int, tree = NULL_TREE); extern int num_template_headers_for_class (tree); extern void check_template_variable (tree); extern tree make_auto (void); extern tree make_decltype_auto (void); extern tree make_template_placeholder (tree); extern bool template_placeholder_p (tree); extern tree do_auto_deduction (tree, tree, tree, tsubst_flags_t = tf_warning_or_error, auto_deduction_context = adc_unspecified, tree = NULL_TREE, int = LOOKUP_NORMAL); extern tree type_uses_auto (tree); extern tree type_uses_auto_or_concept (tree); extern void append_type_to_template_for_access_check (tree, tree, tree, location_t); extern tree convert_generic_types_to_packs (tree, int, int); extern tree splice_late_return_type (tree, tree); extern bool is_auto (const_tree); extern tree process_template_parm (tree, location_t, tree, bool, bool); extern tree end_template_parm_list (tree); extern void end_template_parm_list (void); extern void end_template_decl (void); extern tree maybe_update_decl_type (tree, tree); extern bool check_default_tmpl_args (tree, tree, bool, bool, int); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern tree add_inherited_template_parms (tree, tree); extern bool redeclare_class_template (tree, tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern tree lookup_template_variable (tree, tree); extern int uses_template_parms (tree); extern bool uses_template_parms_level (tree, int); extern bool in_template_function (void); extern bool need_generic_capture (void); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern tree fn_type_unification (tree, tree, tree, const tree *, unsigned int, tree, unification_kind_t, int, bool, bool); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern bool always_instantiate_p (tree); extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error); extern tree instantiate_decl (tree, bool, bool); extern int comp_template_parms (const_tree, const_tree); extern bool builtin_pack_fn_p (tree); extern bool uses_parameter_packs (tree); extern bool template_parameter_pack_p (const_tree); extern bool function_parameter_pack_p (const_tree); extern bool function_parameter_expanded_from_pack_p (tree, tree); extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error); extern bool check_for_bare_parameter_packs (tree); extern tree build_template_info (tree, tree); extern tree get_template_info (const_tree); extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern tree get_pattern_parm (tree, tree); extern int comp_template_args (tree, tree, tree * = NULL, tree * = NULL, bool = false); extern int template_args_equal (tree, tree, bool = false); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, int, tree, tree, tsubst_flags_t); extern tree tsubst (tree, tree, tsubst_flags_t, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree tsubst_expr (tree, tree, tsubst_flags_t, tree, bool); extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern bool problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern struct tinst_level *current_instantiation(void); extern bool instantiating_current_function_p (void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool dependent_scope_p (tree); extern bool any_dependent_template_arguments_p (const_tree); extern bool any_erroneous_template_args_p (const_tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool type_dependent_object_expression_p (tree); extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *); extern bool any_type_dependent_elements_p (const_tree); extern bool type_dependent_expression_p_push (tree); extern bool value_dependent_expression_p (tree); extern bool instantiation_dependent_expression_p (tree); extern bool instantiation_dependent_uneval_expression_p (tree); extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern void make_args_non_dependent (vec<tree, va_gc> *); extern bool reregister_specialization (tree, tree, tree); extern tree instantiate_non_dependent_expr (tree); extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t); extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t); extern tree instantiate_non_dependent_or_null (tree); extern bool variable_template_specialization_p (tree); extern bool alias_type_or_template_p (tree); extern bool alias_template_specialization_p (const_tree); extern bool dependent_alias_template_spec_p (const_tree); extern bool explicit_class_specialization_p (tree); extern bool push_tinst_level (tree); extern bool push_tinst_level_loc (tree, location_t); extern void pop_tinst_level (void); extern struct tinst_level *outermost_tinst_level(void); extern void init_template_processing (void); extern void print_template_statistics (void); bool template_template_parameter_p (const_tree); bool template_type_parameter_p (const_tree); extern bool primary_template_specialization_p (const_tree); extern tree get_primary_template_innermost_parameters (const_tree); extern tree get_template_parms_at_level (tree, int); extern tree get_template_innermost_arguments (const_tree); extern tree get_template_argument_pack_elems (const_tree); extern tree get_function_template_decl (const_tree); extern tree resolve_nondeduced_context (tree, tsubst_flags_t); extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); extern tree coerce_template_parms (tree, tree, tree); extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t); extern void register_local_specialization (tree, tree); extern tree retrieve_local_specialization (tree); extern tree extract_fnparm_pack (tree, tree *); extern tree template_parm_to_arg (tree); extern tree dguide_name (tree); extern bool dguide_name_p (tree); extern bool deduction_guide_p (const_tree); extern bool copy_guide_p (const_tree); extern bool template_guide_p (const_tree); /* in repo.c */ extern void init_repo (void); extern int repo_emit_p (tree); extern bool repo_export_class_p (const_tree); extern void finish_repo (void); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree, tsubst_flags_t); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree, tsubst_flags_t); extern tree build_headof (tree); extern tree build_dynamic_cast (tree, tree, tsubst_flags_t); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *, tsubst_flags_t); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern int accessible_in_template_p (tree, tree); extern tree lookup_field (tree, tree, int, bool); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool, tsubst_flags_t, access_failure_info *afi = NULL); extern tree lookup_member_fuzzy (tree, tree, bool); extern tree locate_field_accessor (tree, tree, bool); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern bool binfo_direct_p (tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ()); /* The representation of a deferred access check. */ struct GTY(()) deferred_access_check { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; /* The location of this access. */ location_t loc; }; /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void); extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *); extern void pop_to_parent_deferring_access_checks (void); extern bool perform_access_checks (vec<deferred_access_check, va_gc> *, tsubst_flags_t); extern bool perform_deferred_access_checks (tsubst_flags_t); extern bool perform_or_defer_access_check (tree, tree, tree, tsubst_flags_t, access_failure_info *afi = NULL); /* RAII sentinel to ensures that deferred access checks are popped before a function returns. */ struct deferring_access_check_sentinel { deferring_access_check_sentinel () { push_deferring_access_checks (dk_deferred); } ~deferring_access_check_sentinel () { pop_deferring_access_checks (); } }; extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void break_maybe_infinite_loop (void); extern void add_decl_expr (tree); extern tree maybe_cleanup_point_expr_void (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern tree finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree, bool, unsigned short); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree, bool, unsigned short); extern tree finish_return_stmt (tree); extern tree begin_for_scope (tree *); extern tree begin_for_stmt (tree, tree); extern void finish_init_stmt (tree); extern void finish_for_cond (tree, tree, bool, unsigned short); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree begin_range_for_stmt (tree, tree); extern void finish_range_for_decl (tree, tree, tree); extern void finish_range_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); extern bool is_this_parameter (tree); enum { BCS_NORMAL = 0, BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4, BCS_TRANSACTION = 8 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (int, tree, tree, tree, tree, tree); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern cp_expr finish_parenthesized_expr (cp_expr); extern tree force_paren_expr (tree); extern tree maybe_undo_parenthesized_ref (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree stmt_expr_value_expr (tree); bool empty_expr_stmt_p (tree); extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *, tsubst_flags_t); extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool, bool, tsubst_flags_t); extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error); extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error); extern cp_expr finish_increment_expr (cp_expr, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t); extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr, tsubst_flags_t); /* Whether this call to finish_compound_literal represents a C++11 functional cast or a C99 compound literal. */ enum fcl_t { fcl_functional, fcl_c99 }; extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern bool outer_automatic_var_p (tree); extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false); extern cp_expr finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **, location_t); extern tree finish_typeof (tree); extern tree finish_underlying_type (tree); extern tree calculate_bases (tree, tsubst_flags_t); extern tree finish_bases (tree, bool); extern tree calculate_direct_bases (tree, tsubst_flags_t); extern tree finish_offsetof (tree, tree, location_t); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void emit_associated_thunks (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern bool expand_or_defer_fn_1 (tree); extern void expand_or_defer_fn (tree); extern void add_typedef_to_current_template_for_access_check (tree, tree, location_t); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool, tsubst_flags_t); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern tree omp_reduction_id (enum tree_code, tree, tree); extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *); extern void cp_check_omp_declare_reduction (tree); extern void finish_omp_declare_simd_methods (tree); extern tree finish_omp_clauses (tree, enum c_omp_region_type); extern tree push_omp_privatization_clauses (bool); extern void pop_omp_privatization_clauses (tree); extern void save_omp_privatization_clauses (vec<tree> &); extern void restore_omp_privatization_clauses (vec<tree> &); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree finish_oacc_data (tree, tree); extern tree finish_oacc_host_data (tree, tree); extern tree finish_omp_construct (enum tree_code, tree, tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, enum tree_code, tree, tree, tree, tree, tree, tree, tree, vec<tree> *, tree); extern void finish_omp_atomic (enum tree_code, enum tree_code, tree, tree, tree, tree, tree, bool); extern void finish_omp_barrier (void); extern void finish_omp_flush (void); extern void finish_omp_taskwait (void); extern void finish_omp_taskyield (void); extern void finish_omp_cancel (tree); extern void finish_omp_cancellation_point (tree); extern tree omp_privatize_field (tree, bool); extern tree begin_transaction_stmt (location_t, tree *, int); extern void finish_transaction_stmt (tree, tree, int, tree); extern tree build_transaction_expr (location_t, tree, int, tree); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool, bool); extern tree baselink_for_fns (tree); extern void finish_static_assert (tree, tree, location_t, bool); extern tree finish_decltype_type (tree, bool, tsubst_flags_t); extern tree finish_trait_expr (enum cp_trait_kind, tree, tree); extern tree build_lambda_expr (void); extern tree build_lambda_object (tree); extern tree begin_lambda_type (tree); extern tree lambda_capture_field_type (tree, bool, bool); extern tree lambda_return_type (tree); extern tree lambda_proxy_type (tree); extern tree lambda_function (tree); extern void apply_deduced_return_type (tree, tree); extern tree add_capture (tree, tree, tree, bool, bool); extern tree add_default_capture (tree, tree, tree); extern void insert_capture_proxy (tree); extern void insert_pending_capture_proxies (void); extern bool is_capture_proxy (tree); extern bool is_normal_capture_proxy (tree); extern bool is_constant_capture_proxy (tree); extern void register_capture_members (tree); extern tree lambda_expr_this_capture (tree, bool); extern void maybe_generic_this_capture (tree, tree); extern tree maybe_resolve_dummy (tree, bool); extern tree current_nonlambda_function (void); extern tree nonlambda_method_basetype (void); extern tree current_nonlambda_scope (void); extern tree current_lambda_expr (void); extern bool generic_lambda_fn_p (tree); extern tree do_dependent_capture (tree, bool = false); extern bool lambda_fn_in_template_p (tree); extern void maybe_add_lambda_conv_op (tree); extern bool is_lambda_ignored_entity (tree); extern bool lambda_static_thunk_p (tree); extern tree finish_builtin_launder (location_t, tree, tsubst_flags_t); extern void start_lambda_scope (tree); extern void record_lambda_scope (tree); extern void finish_lambda_scope (void); extern tree start_lambda_function (tree fn, tree lambda_expr); extern void finish_lambda_function (tree body); /* in tree.c */ extern int cp_tree_operand_length (const_tree); extern int cp_tree_code_length (enum tree_code); extern void cp_free_lang_data (tree t); extern tree force_target_expr (tree, tree, tsubst_flags_t); extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t); extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN ATTRIBUTE_COLD; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern void init_tree (void); extern bool pod_type_p (const_tree); extern bool layout_pod_type_p (const_tree); extern bool std_layout_type_p (const_tree); extern bool trivial_type_p (const_tree); extern bool trivially_copyable_p (const_tree); extern bool type_has_unique_obj_representations (const_tree); extern bool scalarish_type_p (const_tree); extern bool type_has_nontrivial_default_init (const_tree); extern bool type_has_nontrivial_copy_init (const_tree); extern void maybe_warn_parm_abi (tree, location_t); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree); extern bool check_abi_tag_args (tree, tree); extern tree strip_typedefs (tree, bool * = NULL); extern tree strip_typedefs_expr (tree, bool * = NULL); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (const_tree); extern cp_lvalue_kind real_lvalue_p (const_tree); extern cp_lvalue_kind lvalue_kind (const_tree); extern bool glvalue_p (const_tree); extern bool obvalue_p (const_tree); extern bool xvalue_p (const_tree); extern bool bitfield_p (const_tree); extern tree cp_stabilize_reference (tree); extern bool builtin_valid_in_constant_expr_p (const_tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt_loc (location_t, enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...); extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *); extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *); extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned); extern tree build_cplus_new (tree, tree, tsubst_flags_t); extern tree build_aggr_init_expr (tree, tree); extern tree get_target_expr (tree); extern tree get_target_expr_sfinae (tree, tsubst_flags_t); extern tree build_cplus_array_type (tree, tree); extern tree build_array_of_n_type (tree, int); extern bool array_of_runtime_bound_p (tree); extern tree build_array_copy (tree); extern tree build_vec_init_expr (tree, tree, tsubst_flags_t); extern void diagnose_non_constexpr_vec_init (tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern tree build_ref_qualified_type (tree, cp_ref_qualifier); inline tree ovl_first (tree) ATTRIBUTE_PURE; extern tree ovl_make (tree fn, tree next = NULL_TREE); extern tree ovl_insert (tree fn, tree maybe_ovl, bool using_p = false); extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE; extern void lookup_mark (tree lookup, bool val); extern tree lookup_add (tree fns, tree lookup); extern tree lookup_maybe_add (tree fns, tree lookup, bool deduping); extern void lookup_keep (tree lookup, bool keep); extern void lookup_list_keep (tree list, bool keep); extern int is_overloaded_fn (tree) ATTRIBUTE_PURE; extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE; extern tree dependent_name (tree); extern tree get_fns (tree) ATTRIBUTE_PURE; extern tree get_first_fn (tree) ATTRIBUTE_PURE; extern tree ovl_scope (tree); extern const char *cxx_printable_name (tree, int); extern const char *cxx_printable_name_translate (tree, int); extern tree canonical_eh_spec (tree); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern tree break_out_target_exprs (tree, bool = false); extern tree build_ctor_subob_ref (tree, tree, tree); extern tree replace_placeholders (tree, tree, bool * = NULL); extern bool find_placeholders (tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (const_tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (const_tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (const_tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_reference_type (tree, bool); extern tree move (tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern bool cv_qualified_p (const_tree); extern tree cv_unqualified (tree); extern special_function_kind special_function_p (const_tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern duration_kind decl_storage_duration (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, hash_set<tree> *); #define cp_walk_tree(tp,func,data,pset) \ walk_tree_1 (tp, func, data, pset, cp_walk_subtrees) #define cp_walk_tree_without_duplicates(tp,func,data) \ walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees) extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); extern bool cxx_type_hash_eq (const_tree, const_tree); extern tree cxx_copy_lang_qualifiers (const_tree, const_tree); extern void cxx_print_statistics (void); extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t); extern void cp_warn_deprecated_use (tree); /* in ptree.c */ extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (diagnostic_context *, const char *, struct diagnostic_info *); /* in typeck.c */ extern bool cxx_mark_addressable (tree, bool = false); extern int string_conv_p (const_tree, const_tree, int); extern tree cp_truthvalue_conversion (tree); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree require_complete_type_sfinae (tree, tsubst_flags_t); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); inline bool type_unknown_p (const_tree); enum { ce_derived, ce_type, ce_normal, ce_exact }; extern bool comp_except_specs (const_tree, const_tree, int); extern bool comptypes (tree, tree, int); extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); extern bool compparms (const_tree, const_tree); extern int comp_cv_qualification (const_tree, const_tree); extern int comp_cv_qualification (int, int); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool); extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool, bool); extern tree cxx_alignas_expr (tree); extern tree cxx_sizeof_nowarn (tree); extern tree is_bitfield_expr_with_lowered_type (const_tree); extern tree unlowered_expr_type (const_tree); extern tree decay_conversion (tree, tsubst_flags_t, bool = true); extern tree build_class_member_access_expr (cp_expr, tree, tree, bool, tsubst_flags_t); extern tree finish_class_member_access_expr (cp_expr, tree, bool, tsubst_flags_t); extern tree build_x_indirect_ref (location_t, tree, ref_operator, tsubst_flags_t); extern tree cp_build_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree cp_build_fold_indirect_ref (tree); extern tree build_array_ref (location_t, tree, tree); extern tree cp_build_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t); extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) ATTRIBUTE_SENTINEL; extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **, tsubst_flags_t); extern tree build_x_binary_op (location_t, enum tree_code, tree, enum tree_code, tree, enum tree_code, tree *, tsubst_flags_t); extern tree build_x_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree build_x_unary_op (location_t, enum tree_code, cp_expr, tsubst_flags_t); extern tree cp_build_addressof (location_t, tree, tsubst_flags_t); extern tree cp_build_addr_expr (tree, tsubst_flags_t); extern tree cp_build_unary_op (enum tree_code, tree, bool, tsubst_flags_t); extern tree genericize_compound_lvalue (tree); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_x_compound_expr_from_list (tree, expr_list_kind, tsubst_flags_t); extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *, const char *, tsubst_flags_t); extern tree build_x_compound_expr (location_t, tree, tree, tsubst_flags_t); extern tree build_compound_expr (location_t, tree, tree); extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); extern tree build_static_cast (tree, tree, tsubst_flags_t); extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t); extern tree build_const_cast (tree, tree, tsubst_flags_t); extern tree build_c_cast (location_t, tree, tree); extern cp_expr build_c_cast (location_t loc, tree type, cp_expr expr); extern tree cp_build_c_cast (tree, tree, tsubst_flags_t); extern cp_expr build_x_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree cp_build_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree convert_for_initialization (tree, tree, tree, int, impl_conv_rhs, tree, int, tsubst_flags_t); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree); extern bool error_type_p (const_tree); extern bool ptr_reasonably_similar (const_tree, const_tree); extern tree build_ptrmemfunc (tree, tree, int, bool, tsubst_flags_t); extern int cp_type_quals (const_tree); extern int type_memfn_quals (const_tree); extern cp_ref_qualifier type_memfn_rqual (const_tree); extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier); extern bool cp_has_mutable_p (const_tree); extern bool at_least_as_qualified_p (const_tree, const_tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree common_pointer_type (tree, tree); extern tree composite_pointer_type (tree, tree, tree, tree, composite_pointer_operation, tsubst_flags_t); extern tree merge_types (tree, tree); extern tree strip_array_domain (tree); extern tree check_return_expr (tree, bool *); extern tree cp_build_binary_op (location_t, enum tree_code, tree, tree, tsubst_flags_t); extern tree build_x_vec_perm_expr (location_t, tree, tree, tree, tsubst_flags_t); #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false, true) extern tree build_simple_component_ref (tree, tree); extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (location_t, tree, tsubst_flags_t); extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t); extern tree convert_ptrmem (tree, tree, bool, bool, tsubst_flags_t); extern int lvalue_or_else (tree, enum lvalue_use, tsubst_flags_t); extern void check_template_keyword (tree); extern bool check_raw_literal_operator (const_tree decl); extern bool check_literal_operator_args (const_tree, bool *, bool *); extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t); extern tree cp_perform_integral_promotions (tree, tsubst_flags_t); extern tree finish_left_unary_fold_expr (tree, int); extern tree finish_right_unary_fold_expr (tree, int); extern tree finish_binary_fold_expr (tree, tree, int); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (location_t, const_tree, const_tree, diagnostic_t); inline void cxx_incomplete_type_diagnostic (const_tree value, const_tree type, diagnostic_t diag_kind) { cxx_incomplete_type_diagnostic (EXPR_LOC_OR_LOC (value, input_location), value, type, diag_kind); } extern void cxx_incomplete_type_error (location_t, const_tree, const_tree); inline void cxx_incomplete_type_error (const_tree value, const_tree type) { cxx_incomplete_type_diagnostic (value, type, DK_ERROR); } extern void cxx_incomplete_type_inform (const_tree); extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void cxx_readonly_error (tree, enum lvalue_use); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern int abstract_virtuals_error (abstract_class_use, tree); extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t); extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t); extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int); extern tree split_nonconstant_init (tree, tree); extern bool check_narrowing (tree, tree, tsubst_flags_t); extern tree digest_init (tree, tree, tsubst_flags_t); extern tree digest_init_flags (tree, tree, int, tsubst_flags_t); extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (location_t, tree, tsubst_flags_t); extern tree build_m_component_ref (tree, tree, tsubst_flags_t); extern tree build_functional_cast (tree, tree, tsubst_flags_t); extern tree add_exception_specifier (tree, tree, int); extern tree merge_exception_specifiers (tree, tree); /* in mangle.c */ extern void init_mangle (void); extern void mangle_decl (tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree, tree); extern tree mangle_guard_variable (tree); extern tree mangle_tls_init_fn (tree); extern tree mangle_tls_wrapper_fn (tree); extern bool decl_tls_wrapper_p (tree); extern tree mangle_ref_init_variable (tree); extern char * get_mangled_vtable_map_var_name (tree); extern bool mangle_return_type_p (tree); extern tree mangle_decomp (tree, vec<tree> &); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern alias_set_type cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (const_tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern void init_shadowed_var_for_decl (void); extern bool cxx_block_may_fallthru (const_tree); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, gimple_seq *, gimple_seq *); extern void cp_genericize (tree); extern bool cxx_omp_const_qual_no_mutable (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern tree cxx_omp_clause_dtor (tree, tree); extern void cxx_omp_finish_clause (tree, gimple_seq *); extern bool cxx_omp_privatize_by_reference (const_tree); extern bool cxx_omp_disregard_value_expr (tree, bool); extern void cp_fold_function (tree); extern tree cp_fully_fold (tree); extern void clear_fold_cache (void); /* in name-lookup.c */ extern void suggest_alternatives_for (location_t, tree, bool); extern bool suggest_alternative_in_explicit_scope (location_t, tree, tree); extern tree strip_using_decl (tree); /* Tell the binding oracle what kind of binding we are looking for. */ enum cp_oracle_request { CP_ORACLE_IDENTIFIER }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier); extern cp_binding_oracle_function *cp_binding_oracle; /* in constraint.cc */ extern void init_constraint_processing (); extern bool constraint_p (tree); extern tree conjoin_constraints (tree, tree); extern tree conjoin_constraints (tree); extern tree get_constraints (tree); extern void set_constraints (tree, tree); extern void remove_constraints (tree); extern tree current_template_constraints (void); extern tree associate_classtype_constraints (tree); extern tree build_constraints (tree, tree); extern tree get_shorthand_constraints (tree); extern tree build_concept_check (tree, tree, tree = NULL_TREE); extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE); extern tree make_constrained_auto (tree, tree); extern void placeholder_extract_concept_and_args (tree, tree&, tree&); extern bool equivalent_placeholder_constraints (tree, tree); extern hashval_t hash_placeholder_constraint (tree); extern bool deduce_constrained_parameter (tree, tree&, tree&); extern tree resolve_constraint_check (tree); extern tree check_function_concept (tree); extern tree finish_template_introduction (tree, tree); extern bool valid_requirements_p (tree); extern tree finish_concept_name (tree); extern tree finish_shorthand_constraint (tree, tree); extern tree finish_requires_expr (tree, tree); extern tree finish_simple_requirement (tree); extern tree finish_type_requirement (tree); extern tree finish_compound_requirement (tree, tree, bool); extern tree finish_nested_requirement (tree); extern void check_constrained_friend (tree, tree); extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree); extern bool function_concept_check_p (tree); extern tree normalize_expression (tree); extern tree expand_concept (tree, tree); extern bool expanding_concept (); extern tree evaluate_constraints (tree, tree); extern tree evaluate_function_concept (tree, tree); extern tree evaluate_variable_concept (tree, tree); extern tree evaluate_constraint_expression (tree, tree); extern bool constraints_satisfied_p (tree); extern bool constraints_satisfied_p (tree, tree); extern tree lookup_constraint_satisfaction (tree, tree); extern tree memoize_constraint_satisfaction (tree, tree, tree); extern tree lookup_concept_satisfaction (tree, tree); extern tree memoize_concept_satisfaction (tree, tree, tree); extern tree get_concept_expansion (tree, tree); extern tree save_concept_expansion (tree, tree, tree); extern bool* lookup_subsumption_result (tree, tree); extern bool save_subsumption_result (tree, tree, bool); extern bool equivalent_constraints (tree, tree); extern bool equivalently_constrained (tree, tree); extern bool subsumes_constraints (tree, tree); extern bool strictly_subsumes (tree, tree); extern int more_constrained (tree, tree); extern void diagnose_constraints (location_t, tree, tree); /* in logic.cc */ extern tree decompose_conclusions (tree); extern bool subsumes (tree, tree); /* In class.c */ extern void cp_finish_injected_record_type (tree); /* in vtable-class-hierarchy.c */ extern void vtv_compute_class_hierarchy_transitive_closure (void); extern void vtv_generate_init_routine (void); extern void vtv_save_class_info (tree); extern void vtv_recover_class_info (void); extern void vtv_build_vtable_verify_fndecl (void); /* In constexpr.c */ extern void fini_constexpr (void); extern bool literal_type_p (tree); extern tree register_constexpr_fundef (tree, tree); extern bool is_valid_constexpr_fn (tree, bool); extern bool check_constexpr_ctor_body (tree, tree, bool); extern tree constexpr_fn_retval (tree); extern tree ensure_literal_type_for_constexpr_object (tree); extern bool potential_constant_expression (tree); extern bool is_constant_expression (tree); extern bool is_nondependent_constant_expression (tree); extern bool is_nondependent_static_init_expression (tree); extern bool is_static_init_expression (tree); extern bool potential_rvalue_constant_expression (tree); extern bool require_potential_constant_expression (tree); extern bool require_constant_expression (tree); extern bool require_rvalue_constant_expression (tree); extern bool require_potential_rvalue_constant_expression (tree); extern tree cxx_constant_value (tree, tree = NULL_TREE); extern tree cxx_constant_init (tree, tree = NULL_TREE); extern tree maybe_constant_value (tree, tree = NULL_TREE); extern tree maybe_constant_init (tree, tree = NULL_TREE); extern tree fold_non_dependent_expr (tree); extern tree fold_simple (tree); extern bool is_sub_constant_expr (tree); extern bool reduced_constant_expression_p (tree); extern bool is_instantiation_of_constexpr (tree); extern bool var_in_constexpr_fn (tree); extern bool var_in_maybe_constexpr_fn (tree); extern void explain_invalid_constexpr_fn (tree); extern vec<tree> cx_error_context (void); extern tree fold_sizeof_expr (tree); extern void clear_cv_and_fold_caches (void); /* In cp-ubsan.c */ extern void cp_ubsan_maybe_instrument_member_call (tree); extern void cp_ubsan_instrument_member_accesses (tree *); extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree); extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree); extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree); /* Inline bodies. */ inline tree ovl_first (tree node) { while (TREE_CODE (node) == OVERLOAD) node = OVL_FUNCTION (node); return node; } inline bool type_unknown_p (const_tree expr) { return TREE_TYPE (expr) == unknown_type_node; } inline hashval_t named_decl_hash::hash (const value_type decl) { tree name = OVL_NAME (decl); return name ? IDENTIFIER_HASH_VALUE (name) : 0; } inline bool named_decl_hash::equal (const value_type existing, compare_type candidate) { tree name = OVL_NAME (existing); return candidate == name; } inline bool null_node_p (const_tree expr) { STRIP_ANY_LOCATION_WRAPPER (expr); return expr == null_node; } #if CHECKING_P namespace selftest { extern void run_cp_tests (void); /* Declarations for specific families of tests within cp, by source file, in alphabetical order. */ extern void cp_pt_c_tests (); extern void cp_tree_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ /* -- end of C++ */ #endif /* ! GCC_CP_TREE_H */
cp-tree.h
/* Definitions for C++ parsing and type checking. Copyright (C) 1987-2018 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "tm.h" #include "hard-reg-set.h" #include "function.h" /* In order for the format checking to accept the C++ front end diagnostic framework extensions, you must include this file before diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) #error \ In order for the format checking to accept the C++ front end diagnostic \ framework extensions, you must include this file before diagnostic-core.h and \ c-common.h, not after. #endif #include "c-family/c-common.h" #include "diagnostic.h" /* A tree node, together with a location, so that we can track locations (and ranges) during parsing. The location is redundant for node kinds that have locations, but not all node kinds do (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ class cp_expr { public: cp_expr () : m_value (NULL), m_loc (UNKNOWN_LOCATION) {} cp_expr (tree value) : m_value (value), m_loc (EXPR_LOCATION (m_value)) {} cp_expr (tree value, location_t loc): m_value (value), m_loc (loc) {} cp_expr (const cp_expr &other) : m_value (other.m_value), m_loc (other.m_loc) {} /* Implicit conversions to tree. */ operator tree () const { return m_value; } tree & operator* () { return m_value; } tree operator* () const { return m_value; } tree & operator-> () { return m_value; } tree operator-> () const { return m_value; } tree get_value () const { return m_value; } location_t get_location () const { return m_loc; } location_t get_start () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_start; } location_t get_finish () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_finish; } void set_location (location_t loc) { protected_set_expr_location (m_value, loc); m_loc = loc; } void set_range (location_t start, location_t finish) { set_location (make_location (m_loc, start, finish)); } cp_expr& maybe_add_location_wrapper () { m_value = maybe_wrap_with_location (m_value, m_loc); return *this; } private: tree m_value; location_t m_loc; }; inline bool operator == (const cp_expr &lhs, tree rhs) { return lhs.get_value () == rhs; } enum cp_tree_index { CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_INIT_LIST_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_GLOBAL, CPTI_GLOBAL_TYPE, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_AGGR_TAG, CPTI_CONV_OP_MARKER, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_CONV_OP_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_GLOBAL_IDENTIFIER, CPTI_STD_IDENTIFIER, CPTI_ANON_IDENTIFIER, CPTI_AUTO_IDENTIFIER, CPTI_DECLTYPE_AUTO_IDENTIFIER, CPTI_INIT_LIST_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_EMPTY_EXCEPT_SPEC, CPTI_NOEXCEPT_TRUE_SPEC, CPTI_NOEXCEPT_FALSE_SPEC, CPTI_NOEXCEPT_DEFERRED_SPEC, CPTI_TERMINATE_FN, CPTI_CALL_UNEXPECTED_FN, CPTI_GET_EXCEPTION_PTR_FN, CPTI_BEGIN_CATCH_FN, CPTI_END_CATCH_FN, CPTI_ALLOCATE_EXCEPTION_FN, CPTI_FREE_EXCEPTION_FN, CPTI_THROW_FN, CPTI_RETHROW_FN, CPTI_ATEXIT_FN_PTR_TYPE, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_NULLPTR, CPTI_NULLPTR_TYPE, CPTI_ALIGN_TYPE, CPTI_ANY_TARG, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define global_namespace cp_global_trees[CPTI_GLOBAL] #define global_type_node cp_global_trees[CPTI_GLOBAL_TYPE] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] #define nullptr_node cp_global_trees[CPTI_NULLPTR] #define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] /* std::align_val_t */ #define align_type_node cp_global_trees[CPTI_ALIGN_TYPE] /* We cache these tree nodes so as to call get_identifier less frequently. For identifiers for functions, including special member functions such as ctors and assignment operators, the nodes can be used (among other things) to iterate over their overloads defined by/for a type. For example: tree ovlid = assign_op_identifier; tree overloads = get_class_binding (type, ovlid); for (ovl_iterator it (overloads); it; ++it) { ... } iterates over the set of implicitly and explicitly defined overloads of the assignment operator for type (including the copy and move assignment operators, whether deleted or not). */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] #define ovl_op_identifier(ISASS, CODE) (OVL_OP_INFO(ISASS, CODE)->identifier) #define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier) #define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier) /* The name used for conversion operators -- but note that actual conversion functions use special identifiers outside the identifier table. */ #define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the ::, std & anon namespaces. */ #define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER] #define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER] #define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER] /* auto and declspec(auto) identifiers. */ #define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER] #define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER] #define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] /* Exception specifiers used for throw(), noexcept(true), noexcept(false) and deferred noexcept. We rely on these being uncloned. */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] #define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] #define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] #define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC] /* Exception handling function declarations. */ #define terminate_fn cp_global_trees[CPTI_TERMINATE_FN] #define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN] #define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN] #define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN] #define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN] #define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN] #define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN] #define throw_fn cp_global_trees[CPTI_THROW_FN] #define rethrow_fn cp_global_trees[CPTI_RETHROW_FN] /* The type of the function-pointer argument to "__cxa_atexit" (or "std::atexit", if "__cxa_atexit" is not being used). */ #define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A node which matches any template argument. */ #define any_targ_node cp_global_trees[CPTI_ANY_TARG] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node #include "name-lookup.h" /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). COND_EXPR_IS_VEC_DELETE (in COND_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF) PAREN_STRING_LITERAL (in STRING_CST) CP_DECL_THREAD_LOCAL_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE, and OMP_TASKLOOP) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag) LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST) CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) DECL_OVERRIDE_P (in FUNCTION_DECL) IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR) TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR) CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR) PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION) TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO) SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR) COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ) WILDCARD_PACK_P (in WILDCARD_DECL) BLOCK_OUTER_CURLY_BRACE_P (in BLOCK) FOLD_EXPR_MODOP_P (*_FOLD_EXPR) IF_STMT_CONSTEXPR_P (IF_STMT) TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM) DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL) SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT) REINTERPRET_CAST_P (in NOP_EXPR) ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR) 1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) DECL_FINAL_P (in FUNCTION_DECL) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE) CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR) TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO) PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION) OVL_USING_P (in OVERLOAD) IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR) 2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE) TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) FNDECL_USED_AUTO (in FUNCTION_DECL) DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE) REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF) AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR) CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR) OVL_HIDDEN_P (in OVERLOAD) SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT) LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL) CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE) CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR) OVL_NESTED_P (in OVERLOAD) 4: IDENTIFIER_MARKED (IDENTIFIER_NODEs) TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, CALL_EXPR, or FIELD_DECL). DECL_TINFO_P (in VAR_DECL) FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) OVL_LOOKUP_P (in OVERLOAD) LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, NAMESPACE_DECL) 5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) DECL_VTABLE_OR_VTT_P (in VAR_DECL) FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR) CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR) 6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE) DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL) TYPE_MARKED_P (in _TYPE) RANGE_FOR_IVDEP (in RANGE_FOR_STMT) CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_USER_CONSTRUCTOR. 2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE) TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE) 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM) REFERENCE_VLA_OK (in REFERENCE_TYPE) 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_ERROR_REPORTED (in VAR_DECL). DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) LABEL_DECL_BREAK (in LABEL_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) USING_DECL_TYPENAME_P (in USING_DECL) DECL_VLA_CAPTURE_P (in FIELD_DECL) DECL_ARRAY_PARAMETER_P (in PARM_DECL) LABEL_DECL_CONTINUE (in LABEL_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) DECL_CONSTRAINT_VAR_P (in a PARM_DECL) TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL) DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL) LABEL_DECL_CDTOR (in LABEL_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_ANON_UNION_VAR_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) TYPE_DECL_ALIAS_P (in TYPE_DECL) 7: DECL_DEAD_FOR_LOCAL (in VAR_DECL). DECL_THUNK_P (in a member FUNCTION_DECL) DECL_NORMAL_CAPTURE_P (in FIELD_DECL) 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS. For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE. For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE, RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO, BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. If BV_LOST_PRIMARY is set, it means that this entry is for a lost primary virtual base and can be left null in the vtable. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ __typeof (NODE) const __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->u.fn.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* Language-dependent contents of an identifier. */ struct GTY(()) lang_identifier { struct c_common_identifier c_common; cxx_binding *bindings; }; /* Return a typed pointer version of T if it designates a C++ front-end identifier. */ inline lang_identifier* identifier_p (tree t) { if (TREE_CODE (t) == IDENTIFIER_NODE) return (lang_identifier*) t; return NULL; } #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct GTY(()) template_parm_index { struct tree_common common; int index; int level; int orig_level; tree decl; }; struct GTY(()) ptrmem_cst { struct tree_common common; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \ || LAMBDA_FUNCTION_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Mark the outer curly brace BLOCK. */ #define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) #define COND_EXPR_IS_VEC_DELETE(NODE) \ TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE)) /* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions are not constexprs. Other NOP_EXPRs are. */ #define REINTERPRET_CAST_P(NODE) \ TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE)) \ && flag_hosted) /* Lookup walker marking. */ #define LOOKUP_SEEN_P(NODE) TREE_VISITED(NODE) #define LOOKUP_FOUND_P(NODE) \ TREE_LANG_FLAG_4 (TREE_CHECK3(NODE,RECORD_TYPE,UNION_TYPE,NAMESPACE_DECL)) /* These two accessors should only be used by OVL manipulators. Other users should use iterators and convenience functions. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain) /* If set, this was imported in a using declaration. */ #define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE)) /* If set, this overload is a hidden decl. */ #define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE)) /* If set, this overload contains a nested overload. */ #define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE)) /* If set, this overload was constructed during lookup. */ #define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE)) /* If set, this is a persistant lookup. */ #define OVL_USED_P(NODE) TREE_USED (OVERLOAD_CHECK (NODE)) /* The first decl of an overload. */ #define OVL_FIRST(NODE) ovl_first (NODE) /* The name of the overload set. */ #define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE)) /* Whether this is a set of overloaded functions. TEMPLATE_DECLS are always wrapped in an OVERLOAD, so we don't need to check them here. */ #define OVL_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD) /* Whether this is a single member overload. */ #define OVL_SINGLE_P(NODE) \ (TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE)) /* OVL_HIDDEN_P nodes come first, then OVL_USING_P nodes, then regular fns. */ struct GTY(()) tree_overload { struct tree_common common; tree function; }; /* Iterator for a 1 dimensional overload. Permits iterating over the outer level of a 2-d overload when explicitly enabled. */ class ovl_iterator { tree ovl; const bool allow_inner; /* Only used when checking. */ public: explicit ovl_iterator (tree o, bool allow = false) : ovl (o), allow_inner (allow) { } private: /* Do not duplicate. */ ovl_iterator &operator= (const ovl_iterator &); ovl_iterator (const ovl_iterator &); public: operator bool () const { return ovl; } ovl_iterator &operator++ () { ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl); return *this; } tree operator* () const { tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl); /* Check this is not an unexpected 2-dimensional overload. */ gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD); return fn; } public: /* Whether this overload was introduced by a using decl. */ bool using_p () const { return TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl); } bool hidden_p () const { return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl); } public: tree remove_node (tree head) { return remove_node (head, ovl); } tree reveal_node (tree head) { return reveal_node (head, ovl); } protected: /* If we have a nested overload, point at the inner overload and return the next link on the outer one. */ tree maybe_push () { tree r = NULL_TREE; if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl)) { r = OVL_CHAIN (ovl); ovl = OVL_FUNCTION (ovl); } return r; } /* Restore an outer nested overload. */ void pop (tree outer) { gcc_checking_assert (!ovl); ovl = outer; } private: /* We make these static functions to avoid the address of the iterator escaping the local context. */ static tree remove_node (tree head, tree node); static tree reveal_node (tree ovl, tree node); }; /* Iterator over a (potentially) 2 dimensional overload, which is produced by name lookup. */ class lkp_iterator : public ovl_iterator { typedef ovl_iterator parent; tree outer; public: explicit lkp_iterator (tree o) : parent (o, true), outer (maybe_push ()) { } public: lkp_iterator &operator++ () { bool repush = !outer; if (!parent::operator++ () && !repush) { pop (outer); repush = true; } if (repush) outer = maybe_push (); return *this; } }; /* hash traits for declarations. Hashes potential overload sets via DECL_NAME. */ struct named_decl_hash : ggc_remove <tree> { typedef tree value_type; /* A DECL or OVERLOAD */ typedef tree compare_type; /* An identifier. */ inline static hashval_t hash (const value_type decl); inline static bool equal (const value_type existing, compare_type candidate); static inline void mark_empty (value_type &p) {p = NULL_TREE;} static inline bool is_empty (value_type p) {return !p;} /* Nothing is deletable. Everything is insertable. */ static bool is_deleted (value_type) { return false; } static void mark_deleted (value_type) { gcc_unreachable (); } }; struct GTY(()) tree_template_decl { struct tree_decl_common common; tree arguments; tree result; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base in which lookup found the BASELINK_FUNCTIONS. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* If T is a BASELINK, grab the functions, otherwise just T, which is expected to already be a (list of) functions. */ #define MAYBE_BASELINK_FUNCTIONS(T) \ (BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED }; /* The various kinds of C++0x warnings we encounter. */ enum cpp0x_warn_str { /* extended initializer lists */ CPP0X_INITIALIZER_LISTS, /* explicit conversion operators */ CPP0X_EXPLICIT_CONVERSION, /* variadic templates */ CPP0X_VARIADIC_TEMPLATES, /* lambda expressions */ CPP0X_LAMBDA_EXPR, /* C++0x auto */ CPP0X_AUTO, /* scoped enums */ CPP0X_SCOPED_ENUMS, /* defaulted and deleted functions */ CPP0X_DEFAULTED_DELETED, /* inline namespaces */ CPP0X_INLINE_NAMESPACES, /* override controls, override/final */ CPP0X_OVERRIDE_CONTROLS, /* non-static data member initializers */ CPP0X_NSDMI, /* user defined literals */ CPP0X_USER_DEFINED_LITERALS, /* delegating constructors */ CPP0X_DELEGATING_CTORS, /* inheriting constructors */ CPP0X_INHERITING_CTORS, /* C++11 attributes */ CPP0X_ATTRIBUTES, /* ref-qualified member functions */ CPP0X_REF_QUALIFIER }; /* The various kinds of operation used by composite_pointer_type. */ enum composite_pointer_operation { /* comparison */ CPO_COMPARISON, /* conversion */ CPO_CONVERSION, /* conditional expression */ CPO_CONDITIONAL_EXPR }; /* Possible cases of expression list used by build_x_compound_expr_from_list. */ enum expr_list_kind { ELK_INIT, /* initializer */ ELK_MEM_INIT, /* member initializer */ ELK_FUNC_CAST /* functional cast */ }; /* Possible cases of implicit bad rhs conversions. */ enum impl_conv_rhs { ICR_DEFAULT_ARGUMENT, /* default argument */ ICR_CONVERTING, /* converting */ ICR_INIT, /* initialization */ ICR_ARGPASS, /* argument passing */ ICR_RETURN, /* return */ ICR_ASSIGN /* assignment */ }; /* Possible cases of implicit or explicit bad conversions to void. */ enum impl_conv_void { ICV_CAST, /* (explicit) conversion to void */ ICV_SECOND_OF_COND, /* second operand of conditional expression */ ICV_THIRD_OF_COND, /* third operand of conditional expression */ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ ICV_LEFT_OF_COMMA, /* left operand of comma operator */ ICV_STATEMENT, /* statement */ ICV_THIRD_IN_FOR /* for increment expression */ }; /* Possible invalid uses of an abstract class that might not have a specific associated declaration. */ enum GTY(()) abstract_class_use { ACU_UNKNOWN, /* unknown or decl provided */ ACU_CAST, /* cast to abstract class */ ACU_NEW, /* new-expression of abstract class */ ACU_THROW, /* throw-expression of abstract class */ ACU_CATCH, /* catch-parameter of abstract class */ ACU_ARRAY, /* array of abstract class */ ACU_RETURN, /* return type of abstract class */ ACU_PARM /* parameter type of abstract class */ }; /* Macros for access to language-specific slots in an identifier. */ /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. Its PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) /* Kinds of identifiers. Values are carefully chosen. */ enum cp_identifier_kind { cik_normal = 0, /* Not a special identifier. */ cik_keyword = 1, /* A keyword. */ cik_ctor = 2, /* Constructor (in-chg, complete or base). */ cik_dtor = 3, /* Destructor (in-chg, deleting, complete or base). */ cik_simple_op = 4, /* Non-assignment operator name. */ cik_assign_op = 5, /* An assignment operator name. */ cik_conv_op = 6, /* Conversion operator name. */ cik_reserved_for_udlit = 7, /* Not yet in use */ cik_max }; /* Kind bits. */ #define IDENTIFIER_KIND_BIT_0(NODE) \ TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE)) #define IDENTIFIER_KIND_BIT_1(NODE) \ TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE)) #define IDENTIFIER_KIND_BIT_2(NODE) \ TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE)) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) \ TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE)) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) \ TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE)) /* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague linkage which the prelinker has assigned to this translation unit. */ #define IDENTIFIER_REPO_CHOSEN(NAME) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (NAME))) /* True if this identifier is a reserved word. C_RID_CODE (node) is then the RID_* value of the keyword. Value 1. */ #define IDENTIFIER_KEYWORD_P(NODE) \ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ & (!IDENTIFIER_KIND_BIT_1 (NODE)) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is the name of a constructor or destructor. Value 2 or 3. */ #define IDENTIFIER_CDTOR_P(NODE) \ ((!IDENTIFIER_KIND_BIT_2 (NODE)) \ & IDENTIFIER_KIND_BIT_1 (NODE)) /* True if this identifier is the name of a constructor. Value 2. */ #define IDENTIFIER_CTOR_P(NODE) \ (IDENTIFIER_CDTOR_P(NODE) \ & (!IDENTIFIER_KIND_BIT_0 (NODE))) /* True if this identifier is the name of a destructor. Value 3. */ #define IDENTIFIER_DTOR_P(NODE) \ (IDENTIFIER_CDTOR_P(NODE) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is for any operator name (including conversions). Value 4, 5, 6 or 7. */ #define IDENTIFIER_ANY_OP_P(NODE) \ (IDENTIFIER_KIND_BIT_2 (NODE)) /* True if this identifier is for an overloaded operator. Values 4, 5. */ #define IDENTIFIER_OVL_OP_P(NODE) \ (IDENTIFIER_ANY_OP_P (NODE) \ & (!IDENTIFIER_KIND_BIT_1 (NODE))) /* True if this identifier is for any assignment. Values 5. */ #define IDENTIFIER_ASSIGN_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ & IDENTIFIER_KIND_BIT_0 (NODE)) /* True if this identifier is the name of a type-conversion operator. Value 7. */ #define IDENTIFIER_CONV_OP_P(NODE) \ (IDENTIFIER_ANY_OP_P (NODE) \ & IDENTIFIER_KIND_BIT_1 (NODE) \ & (!IDENTIFIER_KIND_BIT_0 (NODE))) /* True if this identifier is a new or delete operator. */ #define IDENTIFIER_NEWDEL_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ && IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC) /* True if this identifier is a new operator. */ #define IDENTIFIER_NEW_OP_P(NODE) \ (IDENTIFIER_OVL_OP_P (NODE) \ && (IDENTIFIER_OVL_OP_FLAGS (NODE) \ & (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC) /* Access a C++-specific index for identifier NODE. Used to optimize operator mappings etc. */ #define IDENTIFIER_CP_INDEX(NODE) \ (IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the default argument. */ #define DEFARG_TOKENS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens) #define DEFARG_INSTANTIATIONS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations) struct GTY (()) tree_default_arg { struct tree_common common; struct cp_token_cache *tokens; vec<tree, va_gc> *instantiations; }; #define DEFERRED_NOEXCEPT_PATTERN(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern) #define DEFERRED_NOEXCEPT_ARGS(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args) #define DEFERRED_NOEXCEPT_SPEC_P(NODE) \ ((NODE) && (TREE_PURPOSE (NODE)) \ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT)) #define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \ (DEFERRED_NOEXCEPT_SPEC_P (NODE) \ && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE) struct GTY (()) tree_deferred_noexcept { struct tree_base base; tree pattern; tree args; }; /* The condition associated with the static assertion. This must be an integral constant expression. */ #define STATIC_ASSERT_CONDITION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) /* The message associated with the static assertion. This must be a string constant, which will be emitted as an error message when the static assert condition is false. */ #define STATIC_ASSERT_MESSAGE(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) /* Source location information for a static assertion. */ #define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) struct GTY (()) tree_static_assert { struct tree_common common; tree condition; tree message; location_t location; }; struct GTY (()) tree_argument_pack_select { struct tree_common common; tree argument_pack; int index; }; /* The different kinds of traits that we encounter. */ enum cp_trait_kind { CPTK_BASES, CPTK_DIRECT_BASES, CPTK_HAS_NOTHROW_ASSIGN, CPTK_HAS_NOTHROW_CONSTRUCTOR, CPTK_HAS_NOTHROW_COPY, CPTK_HAS_TRIVIAL_ASSIGN, CPTK_HAS_TRIVIAL_CONSTRUCTOR, CPTK_HAS_TRIVIAL_COPY, CPTK_HAS_TRIVIAL_DESTRUCTOR, CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS, CPTK_HAS_VIRTUAL_DESTRUCTOR, CPTK_IS_ABSTRACT, CPTK_IS_AGGREGATE, CPTK_IS_BASE_OF, CPTK_IS_CLASS, CPTK_IS_EMPTY, CPTK_IS_ENUM, CPTK_IS_FINAL, CPTK_IS_LITERAL_TYPE, CPTK_IS_POD, CPTK_IS_POLYMORPHIC, CPTK_IS_SAME_AS, CPTK_IS_STD_LAYOUT, CPTK_IS_TRIVIAL, CPTK_IS_TRIVIALLY_ASSIGNABLE, CPTK_IS_TRIVIALLY_CONSTRUCTIBLE, CPTK_IS_TRIVIALLY_COPYABLE, CPTK_IS_UNION, CPTK_UNDERLYING_TYPE, CPTK_IS_ASSIGNABLE, CPTK_IS_CONSTRUCTIBLE }; /* The types that we are processing. */ #define TRAIT_EXPR_TYPE1(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) #define TRAIT_EXPR_TYPE2(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) /* The specific trait that we are processing. */ #define TRAIT_EXPR_KIND(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) struct GTY (()) tree_trait_expr { struct tree_common common; tree type1; tree type2; enum cp_trait_kind kind; }; /* Based off of TYPE_UNNAMED_P. */ #define LAMBDA_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE)) /* Test if FUNCTION_DECL is a lambda function. */ #define LAMBDA_FUNCTION_P(FNDECL) \ (DECL_DECLARES_FUNCTION_P (FNDECL) \ && DECL_OVERLOADED_OPERATOR_P (FNDECL) \ && DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) enum cp_lambda_default_capture_mode_type { CPLD_NONE, CPLD_COPY, CPLD_REFERENCE }; /* The method of default capture, if any. */ #define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) /* The capture-list, including `this'. Each capture is stored as a FIELD_DECL * so that the name, type, and field are all together, whether or not it has * been added to the lambda's class type. TREE_LIST: TREE_PURPOSE: The FIELD_DECL for this capture. TREE_VALUE: The initializer. This is part of a GNU extension. */ #define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) /* During parsing of the lambda-introducer, the node in the capture-list that holds the 'this' capture. During parsing of the body, the capture proxy for that node. */ #define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) /* Predicate tracking whether `this' is in the effective capture set. */ #define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ LAMBDA_EXPR_THIS_CAPTURE(NODE) /* Predicate tracking whether the lambda was declared 'mutable'. */ #define LAMBDA_EXPR_MUTABLE_P(NODE) \ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) /* True iff uses of a const variable capture were optimized away. */ #define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \ TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE)) /* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit capture. */ #define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* The source location of the lambda. */ #define LAMBDA_EXPR_LOCATION(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) /* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ #define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) /* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ #define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) /* During parsing of the lambda, a vector of capture proxies which need to be pushed once we're done processing a nested lambda. */ #define LAMBDA_EXPR_PENDING_PROXIES(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies) /* The closure type of the lambda, which is also the type of the LAMBDA_EXPR. */ #define LAMBDA_EXPR_CLOSURE(NODE) \ (TREE_TYPE (LAMBDA_EXPR_CHECK (NODE))) struct GTY (()) tree_lambda_expr { struct tree_typed typed; tree capture_list; tree this_capture; tree extra_scope; vec<tree, va_gc> *pending_proxies; location_t locus; enum cp_lambda_default_capture_mode_type default_capture_mode; int discriminator; }; /* A (typedef,context,usage location) triplet. It represents a typedef used through a context at a given source location. e.g. struct foo { typedef int myint; }; struct bar { foo::myint v; // #1<-- this location. }; In bar, the triplet will be (myint, foo, #1). */ struct GTY(()) qualified_typedef_usage_s { tree typedef_decl; tree context; location_t locus; }; typedef struct qualified_typedef_usage_s qualified_typedef_usage_t; /* Non-zero if this template specialization has access violations that should be rechecked when the function is instantiated outside argument deduction. */ #define TINFO_HAS_ACCESS_ERRORS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE))) #define FNDECL_HAS_ACCESS_ERRORS(NODE) \ (TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE))) /* Non-zero if this variable template specialization was specified using a template-id, so it's a partial or full specialization and not a definition of the member template of a particular class specialization. */ #define TINFO_USED_TEMPLATE_ID(NODE) \ (TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE))) struct GTY(()) tree_template_info { struct tree_common common; vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking; }; // Constraint information for a C++ declaration. Constraint information is // comprised of: // // - a constraint expression introduced by the template header // - a constraint expression introduced by a function declarator // - the associated constraints, which are the conjunction of those, // and used for declaration matching // // The template and declarator requirements are kept to support pretty // printing constrained declarations. struct GTY(()) tree_constraint_info { struct tree_base base; tree template_reqs; tree declarator_reqs; tree associated_constr; }; // Require that pointer P is non-null before returning. template<typename T> inline T* check_nonnull (T* p) { gcc_assert (p); return p; } // Returns true iff T is non-null and represents constraint info. inline tree_constraint_info * check_constraint_info (tree t) { if (t && TREE_CODE (t) == CONSTRAINT_INFO) return (tree_constraint_info *)t; return NULL; } // Access the expression describing the template constraints. This may be // null if no constraints were introduced in the template parameter list, // a requirements clause after the template parameter list, or constraints // through a constrained-type-specifier. #define CI_TEMPLATE_REQS(NODE) \ check_constraint_info (check_nonnull(NODE))->template_reqs // Access the expression describing the trailing constraints. This is non-null // for any implicit instantiation of a constrained declaration. For a // templated declaration it is non-null only when a trailing requires-clause // was specified. #define CI_DECLARATOR_REQS(NODE) \ check_constraint_info (check_nonnull(NODE))->declarator_reqs // The computed associated constraint expression for a declaration. #define CI_ASSOCIATED_CONSTRAINTS(NODE) \ check_constraint_info (check_nonnull(NODE))->associated_constr // Access the logical constraints on the template parameters introduced // at a given template parameter list level indicated by NODE. #define TEMPLATE_PARMS_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) // Access the logical constraints on the template parameter declaration // indicated by NODE. #define TEMPLATE_PARM_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) /* Non-zero if the noexcept is present in a compound requirement. */ #define COMPOUND_REQ_NOEXCEPT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ)) /* The constraints on an 'auto' placeholder type, used in an argument deduction constraint. */ #define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \ DECL_SIZE_UNIT (TYPE_NAME (NODE)) /* The expression evaluated by the predicate constraint. */ #define PRED_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0) /* The concept of a concept check. */ #define CHECK_CONSTR_CONCEPT(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0) /* The template arguments of a concept check. */ #define CHECK_CONSTR_ARGS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1) /* The expression validated by the predicate constraint. */ #define EXPR_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0) /* The type validated by the predicate constraint. */ #define TYPE_CONSTR_TYPE(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0) /* In an implicit conversion constraint, the source expression. */ #define ICONV_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0) /* In an implicit conversion constraint, the target type. */ #define ICONV_CONSTR_TYPE(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1) /* In an argument deduction constraint, the source expression. */ #define DEDUCT_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0) /* In an argument deduction constraint, the target type pattern. */ #define DEDUCT_CONSTR_PATTERN(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1) /* In an argument deduction constraint, the list of placeholder nodes. */ #define DEDUCT_CONSTR_PLACEHOLDER(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2) /* The expression of an exception constraint. */ #define EXCEPT_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0) /* In a parameterized constraint, the local parameters. */ #define PARM_CONSTR_PARMS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0) /* In a parameterized constraint, the operand. */ #define PARM_CONSTR_OPERAND(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1) /* Whether a PARM_DECL represents a local parameter in a requires-expression. */ #define CONSTRAINT_VAR_P(NODE) \ DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL)) /* The concept constraining this constrained template-parameter. */ #define CONSTRAINED_PARM_CONCEPT(NODE) \ DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE)) /* Any extra template arguments specified for a constrained template-parameter. */ #define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \ DECL_SIZE (TYPE_DECL_CHECK (NODE)) /* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a prototype for the constrained parameter in finish_shorthand_constraint, attached for convenience. */ #define CONSTRAINED_PARM_PROTOTYPE(NODE) \ DECL_INITIAL (TYPE_DECL_CHECK (NODE)) enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_PTRMEM, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_TEMPLATE_DECL, TS_CP_DEFAULT_ARG, TS_CP_DEFERRED_NOEXCEPT, TS_CP_STATIC_ASSERT, TS_CP_ARGUMENT_PACK_SELECT, TS_CP_TRAIT_EXPR, TS_CP_LAMBDA_EXPR, TS_CP_TEMPLATE_INFO, TS_CP_CONSTRAINT_INFO, TS_CP_USERDEF_LITERAL }; /* The resulting tree type. */ union GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl; struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg; struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) static_assertion; struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) argument_pack_select; struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) trait_expression; struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) lambda_expression; struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) template_info; struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO"))) constraint_info; struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL"))) userdef_literal; }; /* Global state. */ struct GTY(()) saved_scope { vec<cxx_saved_binding, va_gc> *old_bindings; tree old_namespace; vec<tree, va_gc> *decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; vec<tree, va_gc> *lang_base; tree lang_name; tree template_parms; cp_binding_level *x_previous_class_level; tree x_saved_tree; /* Only used for uses of this in trailing return type. */ tree x_current_class_ptr; tree x_current_class_ref; int x_processing_template_decl; int x_processing_specialization; BOOL_BITFIELD x_processing_explicit_instantiation : 1; BOOL_BITFIELD need_pop_function_context : 1; /* Nonzero if we are parsing the discarded statement of a constexpr if-statement. */ BOOL_BITFIELD discarded_stmt : 1; int unevaluated_operand; int inhibit_evaluation_warnings; int noexcept_operand; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int omp_declare_target_attribute; struct stmt_tree_s x_stmt_tree; cp_binding_level *class_bindings; cp_binding_level *bindings; hash_map<tree, tree> *GTY((skip)) x_local_specializations; struct saved_scope *prev; }; extern GTY(()) struct saved_scope *scope_chain; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* When parsing a template declaration, a TREE_LIST represents the active template parameters. Each node in the list represents one level of template parameters. The innermost level is first in the list. The depth of each level is stored as an INTEGER_CST in the TREE_PURPOSE of each node. The parameters for that level are stored in the TREE_VALUE. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation #define in_discarded_stmt scope_chain->discarded_stmt /* RAII sentinel to handle clearing processing_template_decl and restoring it when done. */ struct processing_template_decl_sentinel { int saved; processing_template_decl_sentinel (bool reset = true) : saved (processing_template_decl) { if (reset) processing_template_decl = 0; } ~processing_template_decl_sentinel() { processing_template_decl = saved; } }; /* RAII sentinel to disable certain warnings during template substitution and elsewhere. */ struct warning_sentinel { int &flag; int val; warning_sentinel(int& flag, bool suppress=true) : flag(flag), val(flag) { if (suppress) flag = 0; } ~warning_sentinel() { flag = val; } }; /* RAII sentinel that saves the value of a variable, optionally overrides it right away, and restores its value when the sentinel id destructed. */ template <typename T> class temp_override { T& overridden_variable; T saved_value; public: temp_override(T& var) : overridden_variable (var), saved_value (var) {} temp_override(T& var, T overrider) : overridden_variable (var), saved_value (var) { overridden_variable = overrider; } ~temp_override() { overridden_variable = saved_value; } }; /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A map from local variable declarations in the body of the template presently being instantiated to the corresponding instantiated local variables. */ #define local_specializations scope_chain->x_local_specializations /* Nonzero if we are parsing the operand of a noexcept operator. */ #define cp_noexcept_operand scope_chain->noexcept_operand /* A list of private types mentioned, for deferred access checking. */ struct GTY((for_user)) cxx_int_tree_map { unsigned int uid; tree to; }; struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map> { static hashval_t hash (cxx_int_tree_map *); static bool equal (cxx_int_tree_map *, cxx_int_tree_map *); }; struct named_label_entry; /* Defined in decl.c. */ struct named_label_hash : ggc_remove <named_label_entry *> { typedef named_label_entry *value_type; typedef tree compare_type; /* An identifier. */ inline static hashval_t hash (value_type); inline static bool equal (const value_type, compare_type); inline static void mark_empty (value_type &p) {p = NULL;} inline static bool is_empty (value_type p) {return !p;} /* Nothing is deletable. Everything is insertable. */ inline static bool is_deleted (value_type) { return false; } inline static void mark_deleted (value_type) { gcc_unreachable (); } }; /* Global state pertinent to the current function. */ struct GTY(()) language_function { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; tree x_auto_return_pattern; BOOL_BITFIELD returns_value : 1; BOOL_BITFIELD returns_null : 1; BOOL_BITFIELD returns_abnormally : 1; BOOL_BITFIELD infinite_loop: 1; BOOL_BITFIELD x_in_function_try_handler : 1; BOOL_BITFIELD x_in_base_initializer : 1; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; BOOL_BITFIELD invalid_constexpr : 1; hash_table<named_label_hash> *x_named_labels; cp_binding_level *bindings; vec<tree, va_gc> *x_local_names; /* Tracking possibly infinite loops. This is a vec<tree> only because vec<bool> doesn't work with gtype. */ vec<tree, va_gc> *infinite_loops; hash_table<cxx_int_tree_map_hasher> *extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been done. I.e., just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ptr \ : &scope_chain->x_current_class_ptr)) #define current_class_ref \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ref \ : &scope_chain->x_current_class_ref)) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Set to 0 at beginning of a function definition, set to 1 if we see an obvious infinite loop. This can have false positives and false negatives, so it should only be used as a heuristic. */ #define current_function_infinite_loop cp_function_chain->infinite_loop /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->x_in_base_initializer #define in_function_try_handler cp_function_chain->x_in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* A type involving 'auto' to be used for return type deduction. */ #define current_function_auto_return_pattern \ (cp_function_chain->x_auto_return_pattern) /* In parser.c. */ extern tree cp_literal_operator_id (const char *); /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Nonzero if NODE has no name for linkage purposes. */ #define TYPE_UNNAMED_P(NODE) \ (OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a type that could resolve to any kind of concrete type at instantiation time. */ #define WILDCARD_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (T) == DECLTYPE_TYPE) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T)) /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or union type. */ #define SET_CLASS_TYPE_P(T, VAL) \ (TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class type but not an union. */ #define NON_UNION_CLASS_TYPE_P(T) \ (TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T)) /* Keep these checks in ascending code order. */ #define RECORD_OR_UNION_CODE_P(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define OVERLOAD_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) struct GTY (()) tree_pair_s { tree purpose; tree value; }; typedef tree_pair_s *tree_pair_p; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct GTY(()) lang_type { unsigned char align; unsigned has_type_conversion : 1; unsigned has_copy_ctor : 1; unsigned has_default_ctor : 1; unsigned const_needs_init : 1; unsigned ref_needs_init : 1; unsigned has_const_copy_assign : 1; unsigned use_template : 2; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_copy_assign : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; /* 32 bits allocated. */ unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned ptrmemfunc_flag : 1; unsigned was_anonymous : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_copy_assign : 1; unsigned lazy_destructor : 1; unsigned has_const_copy_ctor : 1; unsigned has_complex_copy_ctor : 1; unsigned has_complex_copy_assign : 1; unsigned non_aggregate : 1; unsigned has_complex_dflt : 1; unsigned has_list_ctor : 1; unsigned non_std_layout : 1; unsigned is_literal : 1; unsigned lazy_move_ctor : 1; unsigned lazy_move_assign : 1; unsigned has_complex_move_ctor : 1; unsigned has_complex_move_assign : 1; unsigned has_constexpr_ctor : 1; unsigned unique_obj_representations : 1; unsigned unique_obj_representations_set : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 4; tree primary_base; vec<tree_pair_s, va_gc> *vcall_indices; tree vtables; tree typeinfo_var; vec<tree, va_gc> *vbases; binding_table nested_udts; tree as_base; vec<tree, va_gc> *pure_virtuals; tree friend_classes; vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members; tree key_method; tree decl_list; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* FIXME reuse another field? */ tree lambda_expr; }; /* We used to have a variant type for lang_type. Keep the name of the checking accessor for the sole survivor. */ #define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE)) /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has a move constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that NODE (a class type) is final */ #define CLASSTYPE_FINAL(NODE) \ TYPE_FINAL_P (NODE) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor) #define TYPE_HAS_CONST_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) /* Nonzero if this class has an X(initializer_list<T>) constructor. */ #define TYPE_HAS_LIST_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) /* Nonzero if this class has a constexpr constructor other than a copy/move constructor. Note that a class can have constexpr constructors for static initialization even if it isn't a literal class. */ #define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Nonzero means that this type is either complete or being defined, so we can do lookup in it. */ #define COMPLETE_OR_OPEN_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector of members. During definition, it is unordered and only member functions are present. After completion it is sorted and contains both member functions and non-functions. STAT_HACK is involved to preserve oneslot per name invariant. */ #define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ (get_class_binding_direct (NODE, ctor_identifier)) /* A FUNCTION_DECL for the destructor for NODE. This is the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTOR(NODE) \ (get_class_binding_direct (NODE, dtor_identifier)) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes or tail padding. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* A vec<tree> of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type is an abstract class type. */ #define ABSTRACT_CLASS_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE)) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is not POD for the purpose of layout (as defined in the ABI). This is different from the language's POD. */ #define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class type is a non-standard-layout class. */ #define CLASSTYPE_NON_STD_LAYOUT(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) /* Nonzero means that this class type does have unique object representations. */ #define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations) /* Nonzero means that this class type has CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */ #define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* The associated LAMBDA_EXPR that made this class. */ #define CLASSTYPE_LAMBDA_EXPR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) /* The extra mangling scope for this closure type. */ #define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* A vec<tree_pair_s> of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* Whether or not this entry is for a lost primary virtual base. */ #define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. For a noexcept specification, TREE_VALUE is NULL_TREE and TREE_PURPOSE is the constant-expression. For a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT (for templates) or an OVERLOAD list of functions (for implicitly declared functions). */ #define TYPE_RAISES_EXCEPTIONS(NODE) \ TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' or noexcept(true). */ #define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the case for things declared noexcept(true) and, with -fnothrow-opt, for throw() functions. */ #define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (LANG_DECL_NS_CHECK (NODE)->level) /* Discriminator values for lang_decl. */ enum lang_decl_selector { lds_min, lds_fn, lds_ns, lds_parm, lds_decomp }; /* Flags shared by all forms of DECL_LANG_SPECIFIC. Some of the flags live here only to make lang_decl_min/fn smaller. Do not make this struct larger than 32 bits; instead, make sel smaller. */ struct GTY(()) lang_decl_base { /* Larger than necessary for faster access. */ ENUM_BITFIELD(lang_decl_selector) selector : 16; ENUM_BITFIELD(languages) language : 1; unsigned use_template : 2; unsigned not_really_extern : 1; /* var or fn */ unsigned initialized_in_class : 1; /* var or fn */ unsigned repo_available_p : 1; /* var or fn */ unsigned threadprivate_or_deleted_p : 1; /* var or fn */ unsigned anticipated_p : 1; /* fn, type or template */ /* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */ unsigned friend_or_tls : 1; /* var, fn, type or template */ unsigned unknown_bound_p : 1; /* var */ unsigned odr_used : 1; /* var or fn */ unsigned u2sel : 1; unsigned concept_p : 1; /* applies to vars and functions */ unsigned var_declared_inline_p : 1; /* var */ unsigned dependent_init_p : 1; /* var */ /* 1 spare bit */ }; /* True for DECL codes which have template info and access. */ #define LANG_DECL_HAS_MIN(NODE) \ (VAR_OR_FUNCTION_DECL_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == USING_DECL) /* DECL_LANG_SPECIFIC for the above codes. */ struct GTY(()) lang_decl_min { struct lang_decl_base base; /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree template_info; union lang_decl_u2 { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_VIRTUAL_OFFSET. In a VAR_DECL for which DECL_HAS_VALUE_EXPR_P holds, this is DECL_CAPTURED_VARIABLE. Otherwise this is DECL_ACCESS. */ tree GTY ((tag ("0"))) access; /* For TREE_STATIC VAR_DECL in function, this is DECL_DISCRIMINATOR. */ int GTY ((tag ("1"))) discriminator; } GTY ((desc ("%0.u.base.u2sel"))) u2; }; /* Additional DECL_LANG_SPECIFIC information for functions. */ struct GTY(()) lang_decl_fn { struct lang_decl_min min; /* In a overloaded operator, this is the compressed operator code. */ unsigned ovl_op_code : 6; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned defaulted_p : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned pending_inline_p : 1; unsigned nonconverting : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned hidden_friend_p : 1; unsigned omp_declare_reduction_p : 1; unsigned spare : 13; /* 32-bits padding on 64-bit host. */ /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%1.thunk_p"))) u5; union lang_decl_u3 { struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; struct language_function * GTY ((tag ("0"))) saved_language_function; } GTY ((desc ("%1.pending_inline_p"))) u; }; /* DECL_LANG_SPECIFIC for namespaces. */ struct GTY(()) lang_decl_ns { struct lang_decl_base base; cp_binding_level *level; /* using directives and inline children. These need to be va_gc, because of PCH. */ vec<tree, va_gc> *usings; vec<tree, va_gc> *inlinees; /* Hash table of bound decls. It'd be nice to have this inline, but as the hash_map has a dtor, we can't then put this struct into a union (until moving to c++11). */ hash_table<named_decl_hash> *bindings; }; /* DECL_LANG_SPECIFIC for parameters. */ struct GTY(()) lang_decl_parm { struct lang_decl_base base; int level; int index; }; /* Additional DECL_LANG_SPECIFIC information for structured bindings. */ struct GTY(()) lang_decl_decomp { struct lang_decl_min min; /* The artificial underlying "e" variable of the structured binding variable. */ tree base; }; /* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a union rather than a struct containing a union as its only field, but tree.h declares it as a struct. */ struct GTY(()) lang_decl { union GTY((desc ("%h.base.selector"))) lang_decl_u { /* Nothing of only the base type exists. */ struct lang_decl_base GTY ((default)) base; struct lang_decl_min GTY((tag ("lds_min"))) min; struct lang_decl_fn GTY ((tag ("lds_fn"))) fn; struct lang_decl_ns GTY((tag ("lds_ns"))) ns; struct lang_decl_parm GTY((tag ("lds_parm"))) parm; struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp; } u; }; /* Looks through a template (if present) to find what it declares. */ #define STRIP_TEMPLATE(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_MIN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE)) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min; }) /* We want to be able to check DECL_CONSTRUCTOR_P and such on a function template, not just on a FUNCTION_DECL. So when looking for things in lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ #define LANG_DECL_FN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ if (!DECL_DECLARES_FUNCTION_P (NODE) \ || lt->u.base.selector != lds_fn) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.fn; }) #define LANG_DECL_NS_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != NAMESPACE_DECL \ || lt->u.base.selector != lds_ns) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ns; }) #define LANG_DECL_PARM_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != PARM_DECL \ || lt->u.base.selector != lds_parm) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.parm; }) #define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!VAR_P (NODE) \ || lt->u.base.selector != lds_decomp) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.decomp; }) #define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min.u2; }) #else #define LANG_DECL_MIN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.min) #define LANG_DECL_FN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) #define LANG_DECL_NS_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.ns) #define LANG_DECL_PARM_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.parm) #define LANG_DECL_DECOMP_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.decomp) #define LANG_DECL_U2_CHECK(NODE, TF) \ (&DECL_LANG_SPECIFIC (NODE)->u.min.u2) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) /* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ IDENTIFIER_CTOR_P (DECL_NAME (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ #define DECL_MOVE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) /* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL) is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ IDENTIFIER_DTOR_P (DECL_NAME (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true)) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false)) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (!(TREE_CODE (FN) == FUNCTION_DECL \ && (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\ ; \ else \ for (CLONE = DECL_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = DECL_CHAIN (CLONE)) /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator) /* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */ #define DECL_DISCRIMINATOR_SET_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1) /* The index of a user-declared parameter in its function, starting at 1. All artificial parameters will have index 0. */ #define DECL_PARM_INDEX(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->index) /* The level of a user-declared parameter in its function, starting at 1. A parameter of the function will have level 1; a parameter of the first nested function declarator (i.e. t in void f (void (*p)(T t))) will have level 2. */ #define DECL_PARM_LEVEL(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->level) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE)) /* The type to which conversion operator FN converts to. */ #define DECL_CONV_FN_TYPE(FN) \ TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN))) /* Nonzero if NODE, a static data member, was declared in its class as an array of unknown bound. */ #define VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \ : false) #define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true) /* True iff decl NODE is for an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ IDENTIFIER_ANY_OP_P (DECL_NAME (NODE)) /* Nonzero if NODE is an assignment operator (including += and such). */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE)) /* NODE is a function_decl for an overloaded operator. Return its compressed (raw) operator code. Note that this is not a TREE_CODE. */ #define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \ (LANG_DECL_FN_CHECK (NODE)->ovl_op_code) /* DECL is an overloaded operator. Test whether it is for TREE_CODE (a literal constant). */ #define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \ (DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it is also used to determine how program storage should be allocated. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERNAL, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided or a non-trivial constructor is called. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.initialized_in_class) /* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. Only available for decls with DECL_LANG_SPECIFIC. */ #define DECL_ODR_USED(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.odr_used) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.friend_or_tls) /* Nonzero if the thread-local variable was declared with __thread as opposed to thread_local. */ #define DECL_GNU_TLS_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls) #define SET_DECL_GNU_TLS_P(NODE) \ (retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \ DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for _DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE)) /* Nonzero for _DECL means that this constructor or conversion function is non-converting. */ #define DECL_NONCONVERTING_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pure_virtual) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* True (in a FUNCTION_DECL) if NODE is a function declared with an override virt-specifier */ #define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* Set DECL_THUNKS. */ #define SET_DECL_THUNKS(NODE,THUNKS) \ (LANG_DECL_FN_CHECK (NODE)->context = (THUNKS)) /* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this is the constructor it inherits from. */ #define DECL_INHERITED_CTOR(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* And this is the base that constructor comes from. */ #define DECL_INHERITED_CTOR_BASE(NODE) \ (DECL_INHERITED_CTOR (NODE) \ ? DECL_CONTEXT (flag_new_inheriting_ctors \ ? strip_inheriting_ctors (NODE) \ : DECL_INHERITED_CTOR (NODE)) \ : NULL_TREE) /* Set the inherited base. */ #define SET_DECL_INHERITED_CTOR(NODE,INH) \ (LANG_DECL_FN_CHECK (NODE)->context = (INH)) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && LANG_DECL_FN_CHECK (NODE)->thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True iff DECL is an entity with vague linkage whose definition is available in this translation unit. */ #define DECL_REPO_AVAILABLE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p) /* True if DECL is declared 'constexpr'. */ #define DECL_DECLARED_CONSTEXPR_P(DECL) \ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) // True if NODE was declared as 'concept'. The flag implies that the // declaration is constexpr, that the declaration cannot be specialized or // refined, and that the result type must be convertible to bool. #define DECL_DECLARED_CONCEPT_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.concept_p) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (DECL_NAME (NODE) \ && id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__")) /* Nonzero if the variable was declared to be thread-local. We need a special C++ version of this test because the middle-end DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for templates. */ #define CP_DECL_THREAD_LOCAL_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f () { ... } }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_DECLARES_FUNCTION_P (NODE) \ && DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) #define CP_DECL_CONTEXT(NODE) \ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) \ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) #define NAMESPACE_SCOPE_P(NODE) \ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. */ #define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */ #define FUNCTION_REF_QUALIFIED(NODE) \ TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */ #define FUNCTION_RVALUE_QUALIFIED(NODE) \ TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE)) /* Returns 1 iff VAR_DECL is a construction virtual table. DECL_VTABLE_OR_VTT_P will be true in this case and must be checked before using this macro. */ #define DECL_CONSTRUCTION_VTABLE_P(NODE) \ TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* The nesting depth of namespace, class or function. Makes is_ancestor much simpler. Only 8 bits available. */ #define SCOPE_DEPTH(NODE) \ (NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space) /* Whether the namepace is an inline namespace. */ #define DECL_NAMESPACE_INLINE_P(NODE) \ TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, a vector of using directives. */ #define DECL_NAMESPACE_USING(NODE) \ (LANG_DECL_NS_CHECK (NODE)->usings) /* In a NAMESPACE_DECL, a vector of inline namespaces. */ #define DECL_NAMESPACE_INLINEES(NODE) \ (LANG_DECL_NS_CHECK (NODE)->inlinees) /* Pointer to hash_map from IDENTIFIERS to DECLS */ #define DECL_NAMESPACE_BINDINGS(NODE) \ (LANG_DECL_NS_CHECK (NODE)->bindings) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ (TREE_CODE (NODE) == NAMESPACE_DECL \ && CP_DECL_CONTEXT (NODE) == global_namespace \ && DECL_NAME (NODE) == std_identifier) /* In a TREE_LIST in an attribute list, indicates that the attribute must be applied at instantiation time. */ #define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag was inherited from a template parameter, not explicitly indicated. */ #define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) extern tree decl_shadowed_for_var_lookup (tree); extern void decl_shadowed_for_var_insert (tree, tree); /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* Non zero if the using decl refers to a dependent type. */ #define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE)) /* In a VAR_DECL, true if we have a shadowed local variable in the shadowed var table for this VAR_DECL. */ #define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \ (VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p) /* In a VAR_DECL for a variable declared in a for statement, this is the shadowed (local) variable. */ #define DECL_SHADOWED_FOR_VAR(NODE) \ (DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL) #define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \ (decl_shadowed_for_var_insert (NODE, VAL)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) /* Nonzero for TYPE_DECL means that it was written 'using name = type'. */ #define TYPE_DECL_ALIAS_P(NODE) \ DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE)) /* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */ #define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \ DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a type which is an alias for another type; i.e, a type which declaration was written 'using name-of-type = another-type'. */ #define TYPE_ALIAS_P(NODE) \ (TYPE_P (NODE) \ && TYPE_NAME (NODE) \ && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \ && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE))) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or TEMPLATE_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is nonzero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TREE_VALUE is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \ ->u.min.template_info) /* For a lambda capture proxy, its captured variable. */ #define DECL_CAPTURED_VARIABLE(NODE) \ (LANG_DECL_U2_CHECK (NODE, 0)->access) /* For a VAR_DECL, indicates that the variable is actually a non-static data member of anonymous union that has been promoted to variable status. */ #define DECL_ANON_UNION_VAR_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE))) /* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias templateness of NODE. It'd be nice if this could unconditionally access the slot, rather than return NULL if given a non-templatable type. */ #define TYPE_TEMPLATE_INFO(NODE) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ || TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \ || RECORD_OR_UNION_TYPE_P (NODE) \ ? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE) /* Template information (if any) for an alias type. */ #define TYPE_ALIAS_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \ : NULL_TREE) /* If NODE is a type alias, this accessor returns the template info for the alias template (if any). Otherwise behave as TYPE_TEMPLATE_INFO. */ #define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \ (TYPE_ALIAS_P (NODE) \ ? TYPE_ALIAS_TEMPLATE_INFO (NODE) \ : TYPE_TEMPLATE_INFO (NODE)) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ || (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \ ? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \ : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))) #define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE)) #define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE)) #define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE) /* For a given TREE_VEC containing a template argument list, this property contains the number of arguments that are not defaulted. */ #define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE)) /* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT property. */ #define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) #if CHECKING_P #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) #else #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) #endif /* The list of typedefs - used in the template - that need access checking at template instantiation time. FIXME this should be associated with the TEMPLATE_DECL, not the TEMPLATE_INFO. */ #define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ (NODE))->typedefs_needing_access_checking /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. For each TREE_VEC containing the template arguments for a single level, it's possible to get or set the number of non defaulted template arguments by using the accessor macros GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. For a FIELD_DECL with a non-static data member initializer, this value is the FIELD_DECL it was instantiated from. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */ #define DECL_PACK_P(NODE) \ (DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE))) /* Determines if NODE is an expansion of one or more parameter packs, e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_P(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) /* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_PATTERN(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* The list of parameter packs used in the PACK_EXPANSION_* node. The TREE_VALUE of each TREE_LIST contains the parameter packs. */ #define PACK_EXPANSION_PARAMETER_PACKS(NODE) \ *(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \ ? &TREE_OPERAND (NODE, 1) \ : &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE))) /* Any additional template args to be applied when substituting into the pattern, set by tsubst_pack_expansion for partial instantiations. If this is a TREE_LIST, the TREE_VALUE of the first element is the usual template argument TREE_VEC, and the TREE_PURPOSE of later elements are enclosing functions that provided function parameter packs we'll need to map appropriately. */ #define PACK_EXPANSION_EXTRA_ARGS(NODE) \ *(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ ? &TYPE_MAX_VALUE_RAW (NODE) \ : &TREE_OPERAND ((NODE), 2)) /* True iff this pack expansion is within a function context. */ #define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE) /* True iff this pack expansion is for sizeof.... */ #define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE) /* True iff the wildcard can match a template parameter pack. */ #define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE) /* Determine if this is an argument pack. */ #define ARGUMENT_PACK_P(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) /* The arguments stored in an argument pack. Arguments are stored in a TREE_VEC, which may have length zero. */ #define ARGUMENT_PACK_ARGS(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Set the arguments stored in an argument pack. VALUE must be a TREE_VEC. */ #define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* Whether the argument pack is "incomplete", meaning that more arguments can still be deduced. Incomplete argument packs are only used when the user has provided an explicit template argument list for a variadic function template. Some of the explicit template arguments will be placed into the beginning of the argument pack, but additional arguments might still be deduced. */ #define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE)) /* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template arguments used to fill this pack. */ #define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) /* In an ARGUMENT_PACK_SELECT, the argument pack from which an argument will be selected. */ #define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) /* In an ARGUMENT_PACK_SELECT, the index of the argument we want to select. */ #define ARGUMENT_PACK_SELECT_INDEX(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) #define FOLD_EXPR_CHECK(NODE) \ TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \ BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) #define BINARY_FOLD_EXPR_CHECK(NODE) \ TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) /* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */ #define FOLD_EXPR_P(NODE) \ (TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR) /* True when NODE is a fold over a compound assignment operator. */ #define FOLD_EXPR_MODIFY_P(NODE) \ TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE)) /* An INTEGER_CST containing the tree code of the folded operator. */ #define FOLD_EXPR_OP(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0) /* The expression containing an unexpanded parameter pack. */ #define FOLD_EXPR_PACK(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1) /* In a binary fold expression, the argument with no unexpanded parameter packs. */ #define FOLD_EXPR_INIT(NODE) \ TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2) /* In a FUNCTION_DECL, the saved language-specific per-function data. */ #define DECL_SAVED_FUNCTION_DATA(NODE) \ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ ->u.saved_language_function) /* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */ #define REFERENCE_REF_P(NODE) \ (INDIRECT_REF_P (NODE) \ && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ && (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \ == REFERENCE_TYPE)) /* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a reference to VLA type, because it's used for VLA capture. */ #define REFERENCE_VLA_OK(NODE) \ (TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE))) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) #define CALL_OR_AGGR_INIT_CHECK(NODE) \ TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* True if the arguments to NODE should be evaluated in left-to-right order regardless of PUSH_ARGS_REVERSED. */ #define CALL_EXPR_ORDERED_ARGS(NODE) \ TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* True if the arguments to NODE should be evaluated in right-to-left order regardless of PUSH_ARGS_REVERSED. */ #define CALL_EXPR_REVERSE_ARGS(NODE) \ TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* True if CALL_EXPR was written as an operator expression, not a function call. */ #define CALL_EXPR_OPERATOR_SYNTAX(NODE) \ TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some of the time in C++14 mode. */ #define REF_PARENTHESIZED_P(NODE) \ TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize the object. */ #define AGGR_INIT_ZERO_FIRST(NODE) \ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero means that the call is the jump from a thunk to the thunked-to function. */ #define AGGR_INIT_FROM_THUNK_P(NODE) \ (AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag) /* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of CALL_EXPR_STATIC_CHAIN). */ #define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) #define AGGR_INIT_EXPR_SLOT(NODE) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) #define AGGR_INIT_EXPR_ARG(NODE, I) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) #define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) /* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if the argument count is zero when checking is enabled. Instead, do the pointer arithmetic to advance past the 3 fixed operands in a AGGR_INIT_EXPR. That produces a valid pointer to just past the end of the operand array, even if it's not valid to dereference it. */ #define AGGR_INIT_EXPR_ARGP(NODE) \ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) /* Abstract iterators for AGGR_INIT_EXPRs. */ /* Structure containing iterator state. */ struct aggr_init_expr_arg_iterator { tree t; /* the aggr_init_expr */ int n; /* argument count */ int i; /* next argument index */ }; /* Initialize the abstract argument list iterator object ITER with the arguments from AGGR_INIT_EXPR node EXP. */ inline void init_aggr_init_expr_arg_iterator (tree exp, aggr_init_expr_arg_iterator *iter) { iter->t = exp; iter->n = aggr_init_expr_nargs (exp); iter->i = 0; } /* Return the next argument from abstract argument list iterator object ITER, and advance its state. Return NULL_TREE if there are no more arguments. */ inline tree next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) { tree result; if (iter->i >= iter->n) return NULL_TREE; result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); iter->i++; return result; } /* Initialize the abstract argument list iterator object ITER, then advance past and return the first argument. Useful in for expressions, e.g. for (arg = first_aggr_init_expr_arg (exp, &iter); arg; arg = next_aggr_init_expr_arg (&iter)) */ inline tree first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) { init_aggr_init_expr_arg_iterator (exp, iter); return next_aggr_init_expr_arg (iter); } /* Test whether there are more arguments in abstract argument list iterator ITER, without changing its state. */ inline bool more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) { return (iter->i < iter->n); } /* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ #define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ (arg) = next_aggr_init_expr_arg (&(iter))) /* VEC_INIT_EXPR accessors. */ #define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0) #define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1) /* Indicates that a VEC_INIT_EXPR is a potential constant expression. Only set when the current function is constexpr. */ #define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) /* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ #define VEC_INIT_EXPR_VALUE_INIT(NODE) \ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) /* The condition under which this MUST_NOT_THROW_EXPR actually blocks exceptions. NULL_TREE means 'true'. */ #define MUST_NOT_THROW_COND(NODE) \ TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) \ (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE is in the process of being resolved. */ #define TYPENAME_IS_RESOLVING_P(NODE) \ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* This flag is true of a local VAR_DECL if it was declared in a for statement, but we are no longer in the scope of the for. */ #define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)) /* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL if we already emitted a warning about using it. */ #define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'break' stmts. */ #define LABEL_DECL_BREAK(NODE) \ DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'continue' stmts. */ #define LABEL_DECL_CONTINUE(NODE) \ DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'return' stmts in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */ #define LABEL_DECL_CDTOR(NODE) \ DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE)) /* True if NODE was declared with auto in its return type, but it has started compilation and so the return type might have been changed by return type deduction; its declared return type should be found in DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */ #define FNDECL_USED_AUTO(NODE) \ TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.anticipated_p) /* Is DECL NODE a hidden name? */ #define DECL_HIDDEN_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && TYPE_FUNCTION_OR_TEMPLATE_DECL_P (NODE) \ && DECL_ANTICIPATED (NODE)) /* True if this is a hidden class type. */ #define TYPE_HIDDEN_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ && DECL_ANTICIPATED (TYPE_NAME (NODE))) /* True for artificial decls added for OpenMP privatized non-static data members. */ #define DECL_OMP_PRIVATIZED_MEMBER(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p) /* Nonzero if NODE is an artificial FUNCTION_DECL for #pragma omp declare reduction. */ #define DECL_OMP_DECLARE_REDUCTION_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if NODE is a VAR_DECL which has been declared inline. */ #define DECL_VAR_DECLARED_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \ : false) #define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \ = true) /* True if NODE is a constant variable with a value-dependent initializer. */ #define DECL_DEPENDENT_INIT_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p) #define SET_DECL_DEPENDENT_INIT_P(NODE, X) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X)) /* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding declaration or one of VAR_DECLs for the user identifiers in it. */ #define DECL_DECOMPOSITION_P(NODE) \ (VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \ : false) /* The underlying artificial VAR_DECL for structured binding. */ #define DECL_DECOMP_BASE(NODE) \ (LANG_DECL_DECOMP_CHECK (NODE)->base) /* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members declared with constexpr specifier are implicitly inline variables. */ #define DECL_INLINE_VAR_P(NODE) \ (DECL_VAR_DECLARED_INLINE_P (NODE) \ || (cxx_dialect >= cxx17 \ && DECL_DECLARED_CONSTEXPR_P (NODE) \ && DECL_CLASS_SCOPE_P (NODE))) /* Nonzero if DECL was declared with '= delete'. */ #define DECL_DELETED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= default' (maybe implicitly). */ #define DECL_DEFAULTED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->defaulted_p) /* Nonzero if DECL is explicitly defaulted in the class body. */ #define DECL_DEFAULTED_IN_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) /* Nonzero if DECL was defaulted outside the class body. */ #define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) \ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* Returns true if TYPE is an integral or unscoped enumeration type. */ #define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) /* True if the class type TYPE is a literal type. */ #define CLASSTYPE_LITERAL_P(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. As a GNU extension, we also accept complex types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) \ || TREE_CODE (TYPE) == REAL_TYPE \ || TREE_CODE (TYPE) == COMPLEX_TYPE) /* True iff TYPE is cv decltype(nullptr). */ #define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, pointer-to-member types, and std::nullptr_t are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRDATAMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE) \ || NULLPTR_TYPE_P (TYPE)) /* Determines whether this type is a C++0x scoped enumeration type. Scoped enumerations types are introduced via "enum class" or "enum struct", e.g., enum class Color { Red, Green, Blue }; Scoped enumeration types are different from normal (unscoped) enumeration types in several ways: - The enumerators of a scoped enumeration type are only available within the scope of the enumeration type and not in the enclosing scope. For example, the Red color can be referred to with "Color::Red" but not "Red". - Scoped enumerators and enumerations do not implicitly convert to integers or 'bool'. - The underlying type of the enum is well-defined. */ #define SCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) /* Determine whether this is an unscoped enumeration type. */ #define UNSCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) /* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped enumeration type (1) or a normal (unscoped) enumeration type (0). */ #define SET_SCOPED_ENUM_P(TYPE, VAL) \ (ENUM_IS_SCOPED (TYPE) = (VAL)) #define SET_OPAQUE_ENUM_P(TYPE, VAL) \ (ENUM_IS_OPAQUE (TYPE) = (VAL)) #define OPAQUE_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) /* Determines whether an ENUMERAL_TYPE has an explicit underlying type. */ #define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) /* Returns the underlying type of the given enumeration type. The underlying type is determined in different ways, depending on the properties of the enum: - In C++0x, the underlying type can be explicitly specified, e.g., enum E1 : char { ... } // underlying type is char - In a C++0x scoped enumeration, the underlying type is int unless otherwises specified: enum class E2 { ... } // underlying type is int - Otherwise, the underlying type is determined based on the values of the enumerators. In this case, the ENUM_UNDERLYING_TYPE will not be set until after the definition of the enumeration is completed by finish_enum. */ #define ENUM_UNDERLYING_TYPE(TYPE) \ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-provided constructors, no brace-or-equal-initializers for non-static data members, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == VECTOR_TYPE \ ||TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a late-specified return type. */ #define TYPE_HAS_LATE_RETURN_TYPE(NODE) \ (TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE))) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\ && !TREE_HAS_CONSTRUCTOR (NODE)) /* True if NODE is a init-list used as a direct-initializer, i.e. B b{1,2}, not B b({1,2}) or B b = {1,2}. */ #define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) /* True if an uninitialized element in NODE should not be treated as implicitly value-initialized. Only used in constexpr evaluation. */ #define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \ (TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR should not be used as a variable initializer because it was loaded from a constexpr variable with mutable fields. */ #define CONSTRUCTOR_MUTABLE_POISON(NODE) \ (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE))) /* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather than C++11 functional cast syntax. */ #define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \ (TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */ #define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \ (TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE))) #define DIRECT_LIST_INIT_P(NODE) \ (BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE)) /* True if NODE represents a conversion for direct-initialization in a template. Set by perform_implicit_conversion_flags. */ #define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \ (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* True if NODE represents a dependent conversion of a non-type template argument. Set by maybe_convert_nontype_argument. */ #define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \ (TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* Nonzero means that an object of this type can not be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) /* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) /* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) /* Nonzero if there is a non-trivial X::X(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) /* Nonzero if there is no trivial default constructor for this class. */ #define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE is the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that the default constructor is trivial. */ #define TYPE_HAS_TRIVIAL_DFLT(NODE) \ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRDATAMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (TREE_CODE (NODE) != REFERENCE_TYPE \ && !VOID_TYPE_P (NODE) \ && TREE_CODE (NODE) != FUNCTION_TYPE \ && TREE_CODE (NODE) != METHOD_TYPE) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \ || TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)) /* Returns true if NODE is a pointer to function type. */ #define TYPE_PTRFN_P(NODE) \ (TYPE_PTR_P (NODE) \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function type. */ #define TYPE_REFFN_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE))) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTRMEM_P(NODE) \ (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Returns true if NODE is a pointer or a pointer-to-member. */ #define TYPE_PTR_OR_PTRMEM_P(NODE) \ (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\ cp_type_quals (NODE))) /* As above, but can be used in places that want an lvalue at the expense of not necessarily having the correct cv-qualifiers. */ #define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* The canonical internal RECORD_TYPE from the POINTER_TYPE to METHOD_TYPE. */ #define TYPE_PTRMEMFUNC_TYPE(NODE) \ TYPE_LANG_SLOT_1 (NODE) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) \ (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE))) /* The type in question for an UNDERLYING_TYPE. */ #define UNDERLYING_TYPE_TYPE(NODE) \ (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE))) /* The type in question for BASES. */ #define BASES_TYPE(NODE) \ (TYPE_VALUES_RAW (BASES_CHECK (NODE))) #define BASES_DIRECT(NODE) \ TREE_LANG_FLAG_0 (BASES_CHECK (NODE)) /* The expression in question for a DECLTYPE_TYPE. */ #define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE))) /* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an id-expression or a member-access expression. When false, it was parsed as a full expression. */ #define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag /* These flags indicate that we want different semantics from normal decltype: lambda capture just drops references, init capture uses auto semantics, lambda proxies look through implicit dereference. */ #define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_INIT_CAPTURE(NODE) \ TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \ TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_REF_CAPTURE(NODE) \ TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a lambda capture field for an array of runtime bound. */ #define DECL_VLA_CAPTURE_P(NODE) \ DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE)) /* Nonzero for PARM_DECL node means that this is an array function parameter, i.e, a[] rather than *a. */ #define DECL_ARRAY_PARAMETER_P(NODE) \ DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE)) /* Nonzero for a FIELD_DECL who's NSMDI is currently being instantiated. */ #define DECL_INSTANTIATING_NSDMI_P(NODE) \ DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a simple (no explicit initializer) lambda capture field, making it invisible to name lookup in unevaluated contexts. */ #define DECL_NORMAL_CAPTURE_P(NODE) \ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) /* Define fields and accessors for nodes representing declared names. */ /* Nonzero if TYPE is an unnamed class with a typedef for linkage purposes. */ #define TYPE_WAS_UNNAMED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous) /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_PARMS(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_RESULT(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result /* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS lists all instantiations and specializations of the function so that tsubst_friend_function can reassign them to another template if we find that the namespace-scope template is really a partial instantiation of a friend template. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations, so that if we explicitly specialize a partial instantiation we can walk the list in maybe_process_partial_specialization and reassign them or complain as appropriate. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for other templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) \ DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE)) /* For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*, int'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL for the partial specialization. The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for other templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \ DECL_SIZE (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Nonzero for a raw template parameter node. */ #define TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \ || TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (NODE) == TEMPLATE_PARM_INDEX) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero for a DECL that represents a function template. */ #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a class template or alias template. */ #define DECL_TYPE_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL) /* Nonzero for a DECL that represents a class template. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a TEMPLATE_DECL that represents an alias template. */ #define DECL_ALIAS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE)) /* Nonzero if NODE declares a function. */ #define DECL_DECLARES_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, called the injected-class-name, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header and is not a partial specialization. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Nonzero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \ && !DECL_USE_TEMPLATE (DECL)) /* Nonzero if DECL is a function generated from a function 'temploid', i.e. template, member of class template, or dependent friend. */ #define DECL_TEMPLOID_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INSTANTIATION (DECL) \ || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL)) /* Nonzero if DECL is either defined implicitly by the compiler or generated from a temploid. */ #define DECL_GENERATED_P(DECL) \ (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (!processing_template_parmlist \ && processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. This flag does not indicate whether or not the decl is defined in the current translation unit; it indicates whether or not we should emit the decl at the end of compilation if it is defined and needed. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) \ && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE))) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE))) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as an expr in operand 1, and integer_zero_node in operand 0. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE_LINEAR)) /* Nonzero if this transaction expression's body contains statements. */ #define TRANSACTION_EXPR_IS_STMT(NODE) \ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) #define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3) #define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE)) /* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */ #define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4) /* RANGE_FOR_STMT accessors. These give access to the declarator, expression, body, and scope of the statement, respectively. */ #define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) #define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) #define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) #define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3) #define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4) #define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE)) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) #define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3) /* True if there are case labels for all possible values of switch cond, either because there is a default: case label or because the case label ranges cover all values. */ #define SWITCH_STMT_ALL_CASES_P(NODE) \ TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE)) /* True if the body of a switch stmt contains no BREAK_STMTs. */ #define SWITCH_STMT_NO_BREAK_P(NODE) \ TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE)) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR is the result of list-initialization of a temporary. */ #define TARGET_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR expresses direct-initialization of an object to be named later. */ #define TARGET_EXPR_DIRECT_INIT_P(NODE) \ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) /* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if the initializer has void type, it's doing something more complicated. */ #define SIMPLE_TARGET_EXPR_P(NODE) \ (TREE_CODE (NODE) == TARGET_EXPR \ && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE)))) /* True if EXPR expresses direct-initialization of a TYPE. */ #define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) /* True if this CONVERT_EXPR is for a conversion to virtual base in an NSDMI, and should be re-evaluated when used in a constructor. */ #define CONVERT_EXPR_VBASE_PATH(NODE) \ TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE)) /* True if SIZEOF_EXPR argument is type. */ #define SIZEOF_EXPR_TYPE_P(NODE) \ TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE)) /* True if the ALIGNOF_EXPR was spelled "alignof". */ #define ALIGNOF_EXPR_STD_P(NODE) \ TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE)) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type, /* "typename" types. */ scope_type /* namespace or tagged type name followed by :: */ }; /* The various kinds of lvalues we distinguish. */ enum cp_lvalue_kind_flags { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */ clk_class = 4, /* A prvalue of class or array type. */ clk_bitfield = 8, /* An lvalue for a bit-field. */ clk_packed = 16 /* An lvalue for a packed field. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum cp_lvalue_kind_flags. */ typedef int cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ }; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ }; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ sfk_constructor, /* A constructor. */ sfk_copy_constructor, /* A copy constructor. */ sfk_move_constructor, /* A move constructor. */ sfk_copy_assignment, /* A copy assignment operator. */ sfk_move_assignment, /* A move assignment operator. */ sfk_destructor, /* A destructor. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion, /* A conversion operator. */ sfk_deduction_guide, /* A class template deduction guide. */ sfk_inheriting_constructor /* An inheriting constructor */ }; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ }; enum duration_kind { dk_static, dk_thread, dk_auto, dk_dynamic }; /* Bitmask flags to control type substitution. */ enum tsubst_flags { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ tf_decltype = 1 << 7, /* We are the operand of decltype. Used to implement the special rules for calls in decltype (5.2.2/11). */ tf_partial = 1 << 8, /* Doing initial explicit argument substitution in fn_type_unification. */ tf_fndecl_type = 1 << 9, /* Substituting the type of a function declaration. */ tf_no_cleanup = 1 << 10, /* Do not build a cleanup (build_target_expr and friends) */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; /* This type is used for parameters and variables which hold combinations of the flags in enum tsubst_flags. */ typedef int tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ enum base_access_flags { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum base_access_flags. */ typedef int base_access; /* The various kinds of access check during parsing. */ enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ }; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ }; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* Nonzero if we are inside eq_specializations, which affects comparison of PARM_DECLs in cp_tree_equal. */ extern int comparing_specializations; /* In parser.c. */ /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. This is a count since operands to sizeof can be nested. */ extern int cp_unevaluated_operand; /* RAII class used to inhibit the evaluation of operands during parsing and template instantiation. Evaluation warnings are also inhibited. */ struct cp_unevaluated { cp_unevaluated (); ~cp_unevaluated (); }; /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT }; // An RAII class used to create a new pointer map for local // specializations. When the stack goes out of scope, the // previous pointer map is restored. enum lss_policy { lss_blank, lss_copy }; struct local_specialization_stack { local_specialization_stack (lss_policy = lss_blank); ~local_specialization_stack (); hash_map<tree, tree> *saved; }; /* in class.c */ extern int current_class_depth; /* An array of all local classes present in this translation unit, in declaration order. */ extern GTY(()) vec<tree, va_gc> *local_classes; /* in decl.c */ /* An array of static vars & fns. */ extern GTY(()) vec<tree, va_gc> *static_decls; /* An array of vtable-needing types that have no key function, or have an emitted key function. */ extern GTY(()) vec<tree, va_gc> *keyed_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler doesn't allow '.' in symbol names. */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #else /* NO_DOT_IN_LABEL */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #else /* NO_DOLLAR_IN_LABEL */ #define AUTO_TEMP_NAME "__tmp_" #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \ sizeof (AUTO_TEMP_NAME) - 1)) #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ #define LAMBDANAME_PREFIX "__lambda" #define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d" #define UDLIT_OP_ANSI_PREFIX "operator\"\"" #define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s" #define UDLIT_OP_MANGLED_PREFIX "li" #define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s" #define UDLIT_OPER_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ UDLIT_OP_ANSI_PREFIX, \ sizeof (UDLIT_OP_ANSI_PREFIX) - 1)) #define UDLIT_OP_SUFFIX(ID_NODE) \ (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1) #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1)) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. Two if we're done with front-end processing. */ extern int at_eof; /* True if note_mangling_alias should enqueue mangling aliases for later generation, rather than emitting them right away. */ extern bool defer_mangling_aliases; /* True if noexcept is part of the type (i.e. in C++17). */ extern bool flag_noexcept_type; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; /* Likewise, for thread local storage. */ extern GTY(()) tree tls_aggregates; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) #define LOOKUP_NORMAL (LOOKUP_PROTECT) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 1) /* Non-converting (i.e., "explicit") constructors are not tried. This flag indicates that we are not performing direct-initialization. */ #define LOOKUP_ONLYCONVERTING (1 << 2) #define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 3) /* We're performing a user-defined conversion, so more user-defined conversions are not permitted (only built-in conversions). */ #define LOOKUP_NO_CONVERSION (1 << 4) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 5) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 6) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 7) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 8) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1) /* We're trying to treat an lvalue as an rvalue. */ #define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1) /* We're inside an init-list, so narrowing conversions are ill-formed. */ #define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) /* We're looking up a constructor for list-initialization. */ #define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1) /* This is the first parameter of a copy constructor. */ #define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1) /* We only want to consider list constructors. */ #define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) /* Return after determining which function to call and checking access. Used by sythesized_method_walk to determine which functions will be called to initialize subobjects, in order to determine exception specification and possible implicit delete. This is kind of a hack, but exiting early avoids problems with trying to perform argument conversions when the class isn't complete yet. */ #define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) /* Used by calls from defaulted functions to limit the overload set to avoid cycles trying to declare them (core issue 1092). */ #define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) /* Used in calls to store_init_value to suppress its usual call to digest_init. */ #define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) /* An instantiation with explicit template arguments. */ #define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1) /* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */ #define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1) /* Used by case_conversion to disregard non-integral conversions. */ #define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1) /* Used for delegating constructors in order to diagnose self-delegation. */ #define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 /* #define CONV_NONCONVERTING 32 */ #define CONV_FORCE_TEMP 64 #define CONV_FOLD 128 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) #define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ #define COMPARE_STRUCTURAL 8 /* The comparison is intended to be structural. The actual comparison will be identical to COMPARE_STRICT. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Used with start_decl's initialized parameter. */ #define SD_UNINITIALIZED 0 #define SD_INITIALIZED 1 #define SD_DEFAULTED 2 #define SD_DELETED 3 /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) #define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \ TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))) #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* For a C++17 class deduction placeholder, the template it represents. */ #define CLASS_PLACEHOLDER_TEMPLATE(NODE) \ (DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE)))) /* Contexts in which auto deduction occurs. These flags are used to control diagnostics in do_auto_deduction. */ enum auto_deduction_context { adc_unspecified, /* Not given */ adc_variable_type, /* Variable initializer deduction */ adc_return_type, /* Return type deduction */ adc_unify, /* Template argument deduction */ adc_requirement, /* Argument deduction constraint */ adc_decomp_type /* Decomposition declaration initializer deduction */ }; /* True if this type-parameter belongs to a class template, used by C++17 class template argument deduction. */ #define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */ #define AUTO_IS_DECLTYPE(NODE) \ (TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the top-level entity. TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments identical to their defaults. TFF_NO_TEMPLATE_BINDINGS: do not print information about the template arguments for a function template specialization. TFF_POINTER: we are printing a pointer type. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) #define TFF_UNQUALIFIED_NAME (1 << 11) #define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) #define TFF_NO_TEMPLATE_BINDINGS (1 << 13) #define TFF_POINTER (1 << 14) /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); /* Various flags for the overloaded operator information. */ enum ovl_op_flags { OVL_OP_FLAG_NONE = 0, /* Don't care. */ OVL_OP_FLAG_UNARY = 1, /* Is unary. */ OVL_OP_FLAG_BINARY = 2, /* Is binary. */ OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */ OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */ OVL_OP_FLAG_DELETE = 1, /* operator delete. */ OVL_OP_FLAG_VEC = 2 /* vector new or delete. */ }; /* Compressed operator codes. Order is determined by operators.def and does not match that of tree_codes. */ enum ovl_op_code { OVL_OP_ERROR_MARK, OVL_OP_NOP_EXPR, #define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE, #define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */ #include "operators.def" OVL_OP_MAX }; struct GTY(()) ovl_op_info_t { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The (regular) tree code. */ enum tree_code tree_code : 16; /* The (compressed) operator code. */ enum ovl_op_code ovl_op_code : 8; /* The ovl_op_flags of the operator */ unsigned flags : 8; }; /* Overloaded operator info indexed by ass_op_p & ovl_op_code. */ extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX]; /* Mapping from tree_codes to ovl_op_codes. */ extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES]; /* Mapping for ambi-ary operators from the binary to the unary. */ extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX]; /* Given an ass_op_p boolean and a tree code, return a pointer to its overloaded operator info. Tree codes for non-overloaded operators map to the error-operator. */ #define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \ (&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]]) /* Overloaded operator info for an identifier for which IDENTIFIER_OVL_OP_P is true. */ #define IDENTIFIER_OVL_OP_INFO(NODE) \ (&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)]) #define IDENTIFIER_OVL_OP_FLAGS(NODE) \ (IDENTIFIER_OVL_OP_INFO (NODE)->flags) /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* Non-static member functions have an optional virt-specifier-seq. There is a VIRT_SPEC value for each virt-specifier. They can be combined by bitwise-or to form the complete set of virt-specifiers for a member function. */ enum virt_specifier { VIRT_SPEC_UNSPECIFIED = 0x0, VIRT_SPEC_FINAL = 0x1, VIRT_SPEC_OVERRIDE = 0x2 }; /* A type-qualifier, or bitmask therefore, using the VIRT_SPEC constants. */ typedef int cp_virt_specifiers; /* Wherever there is a function-cv-qual, there could also be a ref-qualifier: [dcl.fct] The return type, the parameter-type-list, the ref-qualifier, and the cv-qualifier-seq, but not the default arguments or the exception specification, are part of the function type. REF_QUAL_NONE Ordinary member function with no ref-qualifier REF_QUAL_LVALUE Member function with the &-ref-qualifier REF_QUAL_RVALUE Member function with the &&-ref-qualifier */ enum cp_ref_qualifier { REF_QUAL_NONE = 0, REF_QUAL_LVALUE = 1, REF_QUAL_RVALUE = 2 }; /* A storage class. */ enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable }; /* An individual decl-specifier. This is used to index the array of locations for the declspecs in struct cp_decl_specifier_seq below. */ enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_alias, ds_constexpr, ds_complex, ds_thread, ds_type_spec, ds_redefined_builtin_type_spec, ds_attribute, ds_std_attribute, ds_storage_class, ds_long_long, ds_concept, ds_last /* This enumerator must always be the last one. */ }; /* A decl-specifier-seq. */ struct cp_decl_specifier_seq { /* An array of locations for the declaration sepecifiers, indexed by enum cp_decl_spec_word. */ source_location locations[ds_last]; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* The c++11 attributes that follows the type specifier. */ tree std_attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* True iff TYPE_SPEC defines a class or enum. */ BOOL_BITFIELD type_definition_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff at least one type-specifier was found. */ BOOL_BITFIELD any_type_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "__intN" was explicitly provided. */ BOOL_BITFIELD explicit_intN_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; /* True iff ds_thread is set for __thread, not thread_local. */ BOOL_BITFIELD gnu_thread_keyword_p : 1; /* True iff the type is a decltype. */ BOOL_BITFIELD decltype_p : 1; }; /* The various kinds of declarators. */ enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_decomp, cdk_error }; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is a template parameter pack. */ bool template_parameter_pack_p; /* Location within source. */ location_t loc; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ ENUM_BITFIELD (cp_declarator_kind) kind : 4; /* Whether we parsed an ellipsis (`...') just before the declarator, to indicate this is a parameter pack. */ BOOL_BITFIELD parameter_pack_p : 1; /* If this declarator is parenthesized, this the open-paren. It is UNKNOWN_LOCATION when not parenthesized. */ location_t parenthesized; location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and cdk_function. */ /* GNU Attributes that apply to this declarator. If the declarator is a pointer or a reference, these attribute apply to the type pointed to. */ tree attributes; /* Standard C++11 attributes that apply to this declarator. If the declarator is a pointer or a reference, these attributes apply to the pointer, rather than to the type pointed to. */ tree std_attributes; /* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator. For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function as a TREE_LIST of decl/default. */ tree parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The virt-specifiers for the function. */ cp_virt_specifiers virt_specifiers; /* The ref-qualifier for the function. */ cp_ref_qualifier ref_qualifier; /* The transaction-safety qualifier for the function. */ tree tx_qualifier; /* The exception-specification for the function. */ tree exception_specification; /* The late-specified return type, if any. */ tree late_return_type; /* The trailing requires-clause, if any. */ tree requires_clause; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; /* For cdk_reference */ struct { /* The cv-qualifiers for the reference. These qualifiers are only used to diagnose ill-formed code. */ cp_cv_quals qualifiers; /* Whether this is an rvalue reference */ bool rvalue_ref; } reference; } u; }; /* A level of template instantiation. */ struct GTY((chain_next ("%h.next"))) tinst_level { /* The immediately deeper level in the chain. */ struct tinst_level *next; /* The original node. TLDCL can be a DECL (for a function or static data member), a TYPE (for a class), depending on what we were asked to instantiate, or a TREE_LIST with the template as PURPOSE and the template args as VALUE, if we are substituting for overload resolution. In all these cases, TARGS is NULL. However, to avoid creating TREE_LIST objects for substitutions if we can help, we store PURPOSE and VALUE in TLDCL and TARGS, respectively. So TLDCL stands for TREE_LIST or DECL (the template is a DECL too), whereas TARGS stands for the template arguments. */ tree tldcl, targs; private: /* Return TRUE iff the original node is a split list. */ bool split_list_p () const { return targs; } /* Return TRUE iff the original node is a TREE_LIST object. */ bool tree_list_p () const { return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST; } /* Return TRUE iff the original node is not a list, split or not. */ bool not_list_p () const { return !split_list_p () && !tree_list_p (); } /* Convert (in place) the original node from a split list to a TREE_LIST. */ tree to_list (); public: /* Release storage for OBJ and node, if it's a TREE_LIST. */ static void free (tinst_level *obj); /* Return TRUE iff the original node is a list, split or not. */ bool list_p () const { return !not_list_p (); } /* Return the original node; if it's a split list, make it a TREE_LIST first, so that it can be returned as a single tree object. */ tree get_node () { if (!split_list_p ()) return tldcl; else return to_list (); } /* Return the original node if it's a DECL or a TREE_LIST, but do NOT convert a split list to a TREE_LIST: return NULL instead. */ tree maybe_get_node () const { if (!split_list_p ()) return tldcl; else return NULL_TREE; } /* The location where the template is instantiated. */ location_t locus; /* errorcount + sorrycount when we pushed this level. */ unsigned short errors; /* Count references to this object. If refcount reaches refcount_infinity value, we don't increment or decrement the refcount anymore, as the refcount isn't accurate anymore. The object can be still garbage collected if unreferenced from anywhere, which might keep referenced objects referenced longer than otherwise necessary. Hitting the infinity is rare though. */ unsigned short refcount; /* Infinity value for the above refcount. */ static const unsigned short refcount_infinity = (unsigned short) ~0; }; bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec); /* Return the type of the `this' parameter of FNTYPE. */ inline tree type_of_this_parm (const_tree fntype) { function_args_iterator iter; gcc_assert (TREE_CODE (fntype) == METHOD_TYPE); function_args_iter_init (&iter, fntype); return function_args_iter_cond (&iter); } /* Return the class of the `this' parameter of FNTYPE. */ inline tree class_of_this_parm (const_tree fntype) { return TREE_TYPE (type_of_this_parm (fntype)); } /* True iff T is a variable template declaration. */ inline bool variable_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (!PRIMARY_TEMPLATE_P (t)) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r); return false; } /* True iff T is a variable concept definition. That is, T is a variable template declared with the concept specifier. */ inline bool variable_concept_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r); return false; } /* True iff T is a concept definition. That is, T is a variable or function template declared with the concept specifier. */ inline bool concept_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r); return false; } /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* Various dump ids. */ extern int class_dump_id; extern int raw_dump_id; /* in call.c */ extern bool check_dtor_name (tree, tree); int magic_varargs_p (tree); extern tree build_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_addr_func (tree, tsubst_flags_t); extern void set_flags_from_callee (tree); extern tree build_call_a (tree, int, tree*); extern tree build_call_n (tree, int, ...); extern bool null_ptr_cst_p (tree); extern bool null_member_pointer_value_p (tree); extern bool sufficient_parms_p (const_tree); extern tree type_decays_to (tree); extern tree extract_call_expr (tree); extern tree build_user_type_conversion (tree, tree, int, tsubst_flags_t); extern tree build_new_function_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *, tree *, tree, tree, tree *, tsubst_flags_t); extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **, tree, int, tsubst_flags_t); extern tree build_new_op (location_t, enum tree_code, int, tree, tree, tree, tree *, tsubst_flags_t); extern tree build_op_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool aligned_allocation_fn_p (tree); extern bool usual_deallocation_fn_p (tree); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree, tsubst_flags_t); extern bool can_convert (tree, tree, tsubst_flags_t); extern bool can_convert_standard (tree, tree, tsubst_flags_t); extern bool can_convert_arg (tree, tree, tree, int, tsubst_flags_t); extern bool can_convert_arg_bad (tree, tree, tree, int, tsubst_flags_t); extern location_t get_fndecl_argument_location (tree, int); /* A class for recording information about access failures (e.g. private fields), so that we can potentially supply a fix-it hint about an accessor (from a context in which the constness of the object is known). */ class access_failure_info { public: access_failure_info () : m_was_inaccessible (false), m_basetype_path (NULL_TREE), m_field_decl (NULL_TREE) {} void record_access_failure (tree basetype_path, tree field_decl); void maybe_suggest_accessor (bool const_p) const; private: bool m_was_inaccessible; tree m_basetype_path; tree m_field_decl; }; extern bool enforce_access (tree, tree, tree, tsubst_flags_t, access_failure_info *afi = NULL); extern void push_defarg_context (tree); extern void pop_defarg_context (void); extern tree convert_default_arg (tree, tree, tree, int, tsubst_flags_t); extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t); extern tree build_x_va_arg (source_location, tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t); extern bool is_properly_derived_from (tree, tree); extern tree initialize_reference (tree, tree, int, tsubst_flags_t); extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern bool type_has_extended_temps (tree); extern tree strip_top_quals (tree); extern bool reference_related_p (tree, tree); extern int remaining_arguments (tree); extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t); extern tree perform_direct_initialization_if_possible (tree, tree, bool, tsubst_flags_t); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, int, tree *, tsubst_flags_t); extern bool is_std_init_list (tree); extern bool is_list_ctor (tree); extern void validate_conversion_obstack (void); extern void mark_versions_used (tree); extern tree get_function_version_dispatcher (tree); /* in class.c */ extern tree build_vfield_ref (tree, tree); extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node); extern tree build_base_path (enum tree_code, tree, tree, int, tsubst_flags_t); extern tree convert_to_base (tree, tree, bool, bool, tsubst_flags_t); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern bool add_method (tree, tree, bool); extern tree declared_access (tree); extern tree currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree outermost_open_class (void); extern tree current_nonlambda_class_type (void); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int *); extern void init_class_processing (void); extern int is_empty_class (tree); extern bool is_really_empty_class (tree); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void build_self_reference (void); extern int same_signature_p (const_tree, const_tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern bool vptr_via_virtual_p (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); extern bool default_ctor_p (tree); extern bool type_has_user_nondefault_constructor (tree); extern tree in_class_defaulted_default_constructor (tree); extern bool user_provided_p (tree); extern bool type_has_user_provided_constructor (tree); extern bool type_has_non_user_provided_default_constructor (tree); extern bool vbase_has_user_provided_move_assign (tree); extern tree default_init_uninitialized_part (tree); extern bool trivial_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_virtual_destructor (tree); extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared); extern bool type_build_ctor_call (tree); extern bool type_build_dtor_call (tree); extern void explain_non_literal_class (tree); extern void inherit_targ_abi_tags (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); extern void check_abi_tags (tree); extern tree missing_abi_tags (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree* decl_cloned_function_p (const_tree, bool); extern void clone_function_decl (tree, bool); extern void adjust_clone_args (tree); extern void deduce_noexcept_on_destructor (tree); extern bool uniquely_derived_from_p (tree, tree); extern bool publicly_uniquely_derived_p (tree, tree); extern tree common_enclosing_class (tree, tree); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree, tsubst_flags_t); extern tree convert_from_reference (tree); extern tree force_rvalue (tree, tsubst_flags_t); extern tree ocp_convert (tree, tree, int, int, tsubst_flags_t); extern tree cp_convert (tree, tree, tsubst_flags_t); extern tree cp_convert_and_check (tree, tree, tsubst_flags_t); extern tree cp_fold_convert (tree, tree); extern tree cp_get_callee (tree); extern tree cp_get_callee_fndecl (tree); extern tree cp_get_callee_fndecl_nofold (tree); extern tree cp_get_fndecl_from_callee (tree, bool fold = true); extern tree convert_to_void (tree, impl_conv_void, tsubst_flags_t); extern tree convert_force (tree, tree, int, tsubst_flags_t); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern bool can_convert_qual (tree, tree); extern tree perform_qualification_conversions (tree, tree); extern bool tx_safe_fn_type_p (tree); extern tree tx_unsafe_fn_variant (tree); extern bool fnptr_conv_p (tree, tree); extern tree strip_fnptr_conv (tree); /* in name-lookup.c */ extern void maybe_push_cleanup_level (tree); extern tree make_anon_name (void); extern tree check_for_out_of_scope_variable (tree); extern void dump (cp_binding_level &ref); extern void dump (cp_binding_level *ptr); extern void print_other_binding_stack (cp_binding_level *); extern tree maybe_push_decl (tree); extern tree current_decl_namespace (void); /* decl.c */ extern tree poplevel (int, int, int); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern void note_break_stmt (void); extern bool note_iteration_stmt_body_start (void); extern void note_iteration_stmt_body_end (bool); extern tree make_lambda_name (void); extern int decls_match (tree, tree, bool = true); extern bool maybe_version_functions (tree, tree, bool); extern tree duplicate_decls (tree, tree, bool); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree build_typename_type (tree, tree, tree, tag_types); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree build_library_fn_ptr (const char *, tree, int); extern tree build_cp_library_fn_ptr (const char *, tree, int); extern tree push_library_fn (tree, tree, tree, int); extern tree push_void_library_fn (tree, tree, int); extern tree push_throw_library_fn (tree, tree); extern void warn_misplaced_attr_for_class_type (source_location location, tree class_type); extern tree check_tag_decl (cp_decl_specifier_seq *, bool); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern bool check_array_initializer (tree, tree, tree); extern void cp_finish_decl (tree, tree, bool, tree, int); extern tree lookup_decomp_type (tree); extern void cp_maybe_mangle_decomp (tree, tree, unsigned int); extern void cp_finish_decomp (tree, tree, unsigned int); extern int cp_complete_array_type (tree *, tree, bool); extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, tree, cp_cv_quals); extern tree grokparms (tree, tree *); extern int copy_fn_p (const_tree); extern bool move_fn_p (const_tree); extern bool move_signature_fn_p (const_tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern bool grok_ctor_properties (const_tree, const_tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern void xref_basetypes (tree, tree); extern tree start_enum (tree, tree, tree, tree, bool, bool *); extern void finish_enum_value_list (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree, tree, location_t); extern tree lookup_enumerator (tree, tree); extern bool start_preparsed_function (tree, tree, int); extern bool start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (bool); extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); extern void maybe_register_incomplete_var (tree); extern void maybe_commonize_var (tree); extern void complete_vars (tree); extern tree static_fn_type (tree); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern tree compute_array_index_type (tree, tree, tsubst_flags_t); extern tree check_default_argument (tree, tree, tsubst_flags_t); extern int wrapup_namespace_globals (); extern tree create_implicit_typedef (tree, tree); extern int local_variable_p (const_tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree cxx_builtin_function (tree decl); extern tree cxx_builtin_function_ext_scope (tree decl); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern tree cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern bool is_direct_enum_init (tree, tree); extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *); extern tree check_var_type (tree, tree); extern tree reshape_init (tree, tree, tsubst_flags_t); extern tree next_initializable_field (tree); extern tree fndecl_declared_return_type (tree); extern bool undeduced_auto_decl (tree); extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error); extern tree finish_case_label (location_t, tree, tree); extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t); extern bool check_array_designated_initializer (constructor_elt *, unsigned HOST_WIDE_INT); extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t); /* in decl2.c */ extern void record_mangling (tree, bool); extern void overwrite_mangling (tree, tree); extern void note_mangling_alias (tree, tree); extern void generate_mangling_aliases (void); extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier); extern tree build_pointer_ptrmemfn_type (tree); extern tree change_return_type (tree, tree); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern bool vague_linkage_p (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (location_t, tree, tree, bool); extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree, tree, tree); extern bool any_dependent_type_attributes_p (tree); extern tree cp_reconstruct_complex_type (tree, tree); extern bool attributes_naming_typedef_ok (tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cxx_post_compilation_parsing_cleanups (void); extern tree coerce_new_type (tree); extern tree coerce_delete_type (tree); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void reset_type_linkage (tree); extern void tentative_decl_linkage (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool decl_defined_p (tree); extern bool decl_constant_var_p (tree); extern bool decl_maybe_constant_var_p (tree); extern void no_linkage_error (tree); extern void check_default_args (tree); extern bool mark_used (tree); extern bool mark_used (tree, tsubst_flags_t); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree, bool); extern tree set_guard (tree); extern tree get_tls_wrapper_fn (tree); extern void mark_needed (tree); extern bool decl_needed_p (tree); extern void note_vague_linkage_fn (tree); extern void note_variable_template_instantiation (tree); extern tree build_artificial_parm (tree, tree, tree); extern bool possibly_inlined_p (tree); extern int parm_index (tree); extern tree vtv_start_verification_constructor_init_function (void); extern tree vtv_finish_verification_constructor_init_function (tree); extern bool cp_omp_mappable_type (tree); /* in error.c */ extern const char *type_as_string (tree, int); extern const char *type_as_string_translate (tree, int); extern const char *decl_as_string (tree, int); extern const char *decl_as_string_translate (tree, int); extern const char *decl_as_dwarf_string (tree, int); extern const char *expr_as_string (tree, int); extern const char *lang_decl_name (tree, int, bool); extern const char *lang_decl_dwarf_name (tree, int, bool); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void maybe_warn_variadic_templates (void); extern void maybe_warn_cpp0x (cpp0x_warn_str str); extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern location_t location_of (tree); extern void qualified_name_lookup_error (tree, tree, tree, location_t); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (tree); extern int nothrow_libfn_p (const_tree); extern void check_handlers (tree); extern tree finish_noexcept_expr (tree, tsubst_flags_t); extern bool expr_noexcept_p (tree, tsubst_flags_t); extern void perform_deferred_noexcept_checks (void); extern bool nothrow_spec_p (const_tree); extern bool type_noexcept_p (const_tree); extern bool type_throw_all_p (const_tree); extern tree build_noexcept_spec (tree, int); extern void choose_personality_routine (enum languages); extern tree build_must_not_throw_expr (tree,tree); extern tree eh_type_info (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern tree build_eh_type_type (tree); extern tree cp_protect_cleanup_actions (void); extern tree create_try_catch_expr (tree, tree); /* in expr.c */ extern tree cplus_expand_constant (tree); extern tree mark_use (tree expr, bool rvalue_p, bool read_p, location_t = UNKNOWN_LOCATION, bool reject_builtin = true); extern tree mark_rvalue_use (tree, location_t = UNKNOWN_LOCATION, bool reject_builtin = true); extern tree mark_lvalue_use (tree); extern tree mark_lvalue_use_nonread (tree); extern tree mark_type_use (tree); extern tree mark_discarded_use (tree); extern void mark_exp_read (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); extern void set_global_friend (tree); extern bool is_global_friend (tree); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int, tsubst_flags_t); extern int is_class_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_value_init (tree, tsubst_flags_t); extern tree build_value_init_noctor (tree, tsubst_flags_t); extern tree get_nsdmi (tree, bool, tsubst_flags_t); extern tree build_offset_ref (tree, tree, bool, tsubst_flags_t); extern tree throw_bad_array_new_length (void); extern bool type_has_new_extended_alignment (tree); extern unsigned malloc_alignment (void); extern tree build_new (vec<tree, va_gc> **, tree, tree, vec<tree, va_gc> **, int, tsubst_flags_t); extern tree get_temp_regvar (tree, tree); extern tree build_vec_init (tree, tree, tree, bool, int, tsubst_flags_t); extern tree build_delete (tree, tree, special_function_kind, int, int, tsubst_flags_t); extern void push_base_cleanups (void); extern tree build_vec_delete (tree, tree, special_function_kind, int, tsubst_flags_t); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree scalar_constant_value (tree); extern tree decl_really_constant_value (tree); extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); extern tree build_vtbl_address (tree); extern bool maybe_reject_flexarray_init (tree, tree); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree, location_t = UNKNOWN_LOCATION); extern tree unqualified_fn_lookup_error (cp_expr); extern tree make_conv_op_name (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern void fit_decomposition_lang_decl (tree, tree); extern tree copy_decl (tree CXX_MEM_STAT_INFO); extern tree copy_type (tree CXX_MEM_STAT_INFO); extern tree cxx_make_type (enum tree_code); extern tree make_class_type (enum tree_code); extern const char *get_identifier_kind_name (tree); extern void set_identifier_kind (tree, cp_identifier_kind); extern bool cxx_init (void); extern void cxx_finish (void); extern bool in_main_input_context (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern bool trivial_fn_p (tree); extern tree forward_parm (tree); extern bool is_trivially_xible (enum tree_code, tree, tree); extern bool is_xible (enum tree_code, tree, tree); extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error); extern void after_nsdmi_defaulted_late_checks (tree); extern bool maybe_explain_implicit_delete (tree); extern void explain_implicit_non_constexpr (tree); extern void deduce_inheriting_ctor (tree); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (const_tree, tree); extern int num_artificial_parms_for (const_tree); extern tree make_alias_for (tree, tree); extern tree get_copy_ctor (tree, tsubst_flags_t); extern tree get_copy_assign (tree); extern tree get_default_ctor (tree); extern tree get_dtor (tree, tsubst_flags_t); extern tree strip_inheriting_ctors (tree); extern tree inherited_ctor_binfo (tree); extern bool ctor_omit_inherited_parms (tree); extern tree locate_ctor (tree); extern tree implicitly_declare_fn (special_function_kind, tree, bool, tree, tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* In parser.c */ extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool, unsigned short); extern bool parsing_nsdmi (void); extern bool parsing_default_capturing_generic_lambda_in_template (void); extern void inject_this_parameter (tree, cp_cv_quals); extern location_t defarg_location (tree); extern void maybe_show_extern_c_location (void); /* in pt.c */ extern bool check_template_shadow (tree); extern bool check_auto_in_tmpl_args (tree, tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern void check_unqualified_spec_or_inst (tree, location_t); extern tree check_explicit_specialization (tree, tree, int, int, tree = NULL_TREE); extern int num_template_headers_for_class (tree); extern void check_template_variable (tree); extern tree make_auto (void); extern tree make_decltype_auto (void); extern tree make_template_placeholder (tree); extern bool template_placeholder_p (tree); extern tree do_auto_deduction (tree, tree, tree, tsubst_flags_t = tf_warning_or_error, auto_deduction_context = adc_unspecified, tree = NULL_TREE, int = LOOKUP_NORMAL); extern tree type_uses_auto (tree); extern tree type_uses_auto_or_concept (tree); extern void append_type_to_template_for_access_check (tree, tree, tree, location_t); extern tree convert_generic_types_to_packs (tree, int, int); extern tree splice_late_return_type (tree, tree); extern bool is_auto (const_tree); extern tree process_template_parm (tree, location_t, tree, bool, bool); extern tree end_template_parm_list (tree); extern void end_template_parm_list (void); extern void end_template_decl (void); extern tree maybe_update_decl_type (tree, tree); extern bool check_default_tmpl_args (tree, tree, bool, bool, int); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern tree add_inherited_template_parms (tree, tree); extern bool redeclare_class_template (tree, tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern tree lookup_template_variable (tree, tree); extern int uses_template_parms (tree); extern bool uses_template_parms_level (tree, int); extern bool in_template_function (void); extern bool need_generic_capture (void); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern tree fn_type_unification (tree, tree, tree, const tree *, unsigned int, tree, unification_kind_t, int, bool, bool); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern bool always_instantiate_p (tree); extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error); extern tree instantiate_decl (tree, bool, bool); extern int comp_template_parms (const_tree, const_tree); extern bool builtin_pack_fn_p (tree); extern bool uses_parameter_packs (tree); extern bool template_parameter_pack_p (const_tree); extern bool function_parameter_pack_p (const_tree); extern bool function_parameter_expanded_from_pack_p (tree, tree); extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error); extern bool check_for_bare_parameter_packs (tree); extern tree build_template_info (tree, tree); extern tree get_template_info (const_tree); extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern tree get_pattern_parm (tree, tree); extern int comp_template_args (tree, tree, tree * = NULL, tree * = NULL, bool = false); extern int template_args_equal (tree, tree, bool = false); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, int, tree, tree, tsubst_flags_t); extern tree tsubst (tree, tree, tsubst_flags_t, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree tsubst_expr (tree, tree, tsubst_flags_t, tree, bool); extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern bool problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern struct tinst_level *current_instantiation(void); extern bool instantiating_current_function_p (void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool dependent_scope_p (tree); extern bool any_dependent_template_arguments_p (const_tree); extern bool any_erroneous_template_args_p (const_tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool type_dependent_object_expression_p (tree); extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *); extern bool any_type_dependent_elements_p (const_tree); extern bool type_dependent_expression_p_push (tree); extern bool value_dependent_expression_p (tree); extern bool instantiation_dependent_expression_p (tree); extern bool instantiation_dependent_uneval_expression_p (tree); extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern void make_args_non_dependent (vec<tree, va_gc> *); extern bool reregister_specialization (tree, tree, tree); extern tree instantiate_non_dependent_expr (tree); extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t); extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t); extern tree instantiate_non_dependent_or_null (tree); extern bool variable_template_specialization_p (tree); extern bool alias_type_or_template_p (tree); extern bool alias_template_specialization_p (const_tree); extern bool dependent_alias_template_spec_p (const_tree); extern bool explicit_class_specialization_p (tree); extern bool push_tinst_level (tree); extern bool push_tinst_level_loc (tree, location_t); extern void pop_tinst_level (void); extern struct tinst_level *outermost_tinst_level(void); extern void init_template_processing (void); extern void print_template_statistics (void); bool template_template_parameter_p (const_tree); bool template_type_parameter_p (const_tree); extern bool primary_template_specialization_p (const_tree); extern tree get_primary_template_innermost_parameters (const_tree); extern tree get_template_parms_at_level (tree, int); extern tree get_template_innermost_arguments (const_tree); extern tree get_template_argument_pack_elems (const_tree); extern tree get_function_template_decl (const_tree); extern tree resolve_nondeduced_context (tree, tsubst_flags_t); extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); extern tree coerce_template_parms (tree, tree, tree); extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t); extern void register_local_specialization (tree, tree); extern tree retrieve_local_specialization (tree); extern tree extract_fnparm_pack (tree, tree *); extern tree template_parm_to_arg (tree); extern tree dguide_name (tree); extern bool dguide_name_p (tree); extern bool deduction_guide_p (const_tree); extern bool copy_guide_p (const_tree); extern bool template_guide_p (const_tree); /* in repo.c */ extern void init_repo (void); extern int repo_emit_p (tree); extern bool repo_export_class_p (const_tree); extern void finish_repo (void); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree, tsubst_flags_t); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree, tsubst_flags_t); extern tree build_headof (tree); extern tree build_dynamic_cast (tree, tree, tsubst_flags_t); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *, tsubst_flags_t); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern int accessible_in_template_p (tree, tree); extern tree lookup_field (tree, tree, int, bool); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool, tsubst_flags_t, access_failure_info *afi = NULL); extern tree lookup_member_fuzzy (tree, tree, bool); extern tree locate_field_accessor (tree, tree, bool); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern bool binfo_direct_p (tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ()); /* The representation of a deferred access check. */ struct GTY(()) deferred_access_check { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; /* The location of this access. */ location_t loc; }; /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void); extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *); extern void pop_to_parent_deferring_access_checks (void); extern bool perform_access_checks (vec<deferred_access_check, va_gc> *, tsubst_flags_t); extern bool perform_deferred_access_checks (tsubst_flags_t); extern bool perform_or_defer_access_check (tree, tree, tree, tsubst_flags_t, access_failure_info *afi = NULL); /* RAII sentinel to ensures that deferred access checks are popped before a function returns. */ struct deferring_access_check_sentinel { deferring_access_check_sentinel () { push_deferring_access_checks (dk_deferred); } ~deferring_access_check_sentinel () { pop_deferring_access_checks (); } }; extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void break_maybe_infinite_loop (void); extern void add_decl_expr (tree); extern tree maybe_cleanup_point_expr_void (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern tree finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree, bool, unsigned short); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree, bool, unsigned short); extern tree finish_return_stmt (tree); extern tree begin_for_scope (tree *); extern tree begin_for_stmt (tree, tree); extern void finish_init_stmt (tree); extern void finish_for_cond (tree, tree, bool, unsigned short); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree begin_range_for_stmt (tree, tree); extern void finish_range_for_decl (tree, tree, tree); extern void finish_range_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); extern bool is_this_parameter (tree); enum { BCS_NORMAL = 0, BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4, BCS_TRANSACTION = 8 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (int, tree, tree, tree, tree, tree); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern cp_expr finish_parenthesized_expr (cp_expr); extern tree force_paren_expr (tree); extern tree maybe_undo_parenthesized_ref (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree stmt_expr_value_expr (tree); bool empty_expr_stmt_p (tree); extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *, tsubst_flags_t); extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool, bool, tsubst_flags_t); extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error); extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error); extern cp_expr finish_increment_expr (cp_expr, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t); extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr, tsubst_flags_t); /* Whether this call to finish_compound_literal represents a C++11 functional cast or a C99 compound literal. */ enum fcl_t { fcl_functional, fcl_c99 }; extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern bool outer_automatic_var_p (tree); extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false); extern cp_expr finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **, location_t); extern tree finish_typeof (tree); extern tree finish_underlying_type (tree); extern tree calculate_bases (tree, tsubst_flags_t); extern tree finish_bases (tree, bool); extern tree calculate_direct_bases (tree, tsubst_flags_t); extern tree finish_offsetof (tree, tree, location_t); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void emit_associated_thunks (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern bool expand_or_defer_fn_1 (tree); extern void expand_or_defer_fn (tree); extern void add_typedef_to_current_template_for_access_check (tree, tree, location_t); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool, tsubst_flags_t); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern tree omp_reduction_id (enum tree_code, tree, tree); extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *); extern void cp_check_omp_declare_reduction (tree); extern void finish_omp_declare_simd_methods (tree); extern tree finish_omp_clauses (tree, enum c_omp_region_type); extern tree push_omp_privatization_clauses (bool); extern void pop_omp_privatization_clauses (tree); extern void save_omp_privatization_clauses (vec<tree> &); extern void restore_omp_privatization_clauses (vec<tree> &); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree finish_oacc_data (tree, tree); extern tree finish_oacc_host_data (tree, tree); extern tree finish_omp_construct (enum tree_code, tree, tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, enum tree_code, tree, tree, tree, tree, tree, tree, tree, vec<tree> *, tree); extern void finish_omp_atomic (enum tree_code, enum tree_code, tree, tree, tree, tree, tree, bool); extern void finish_omp_barrier (void); extern void finish_omp_flush (void); extern void finish_omp_taskwait (void); extern void finish_omp_taskyield (void); extern void finish_omp_cancel (tree); extern void finish_omp_cancellation_point (tree); extern tree omp_privatize_field (tree, bool); extern tree begin_transaction_stmt (location_t, tree *, int); extern void finish_transaction_stmt (tree, tree, int, tree); extern tree build_transaction_expr (location_t, tree, int, tree); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool, bool); extern tree baselink_for_fns (tree); extern void finish_static_assert (tree, tree, location_t, bool); extern tree finish_decltype_type (tree, bool, tsubst_flags_t); extern tree finish_trait_expr (enum cp_trait_kind, tree, tree); extern tree build_lambda_expr (void); extern tree build_lambda_object (tree); extern tree begin_lambda_type (tree); extern tree lambda_capture_field_type (tree, bool, bool); extern tree lambda_return_type (tree); extern tree lambda_proxy_type (tree); extern tree lambda_function (tree); extern void apply_deduced_return_type (tree, tree); extern tree add_capture (tree, tree, tree, bool, bool); extern tree add_default_capture (tree, tree, tree); extern void insert_capture_proxy (tree); extern void insert_pending_capture_proxies (void); extern bool is_capture_proxy (tree); extern bool is_normal_capture_proxy (tree); extern bool is_constant_capture_proxy (tree); extern void register_capture_members (tree); extern tree lambda_expr_this_capture (tree, bool); extern void maybe_generic_this_capture (tree, tree); extern tree maybe_resolve_dummy (tree, bool); extern tree current_nonlambda_function (void); extern tree nonlambda_method_basetype (void); extern tree current_nonlambda_scope (void); extern tree current_lambda_expr (void); extern bool generic_lambda_fn_p (tree); extern tree do_dependent_capture (tree, bool = false); extern bool lambda_fn_in_template_p (tree); extern void maybe_add_lambda_conv_op (tree); extern bool is_lambda_ignored_entity (tree); extern bool lambda_static_thunk_p (tree); extern tree finish_builtin_launder (location_t, tree, tsubst_flags_t); extern void start_lambda_scope (tree); extern void record_lambda_scope (tree); extern void finish_lambda_scope (void); extern tree start_lambda_function (tree fn, tree lambda_expr); extern void finish_lambda_function (tree body); /* in tree.c */ extern int cp_tree_operand_length (const_tree); extern int cp_tree_code_length (enum tree_code); extern void cp_free_lang_data (tree t); extern tree force_target_expr (tree, tree, tsubst_flags_t); extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t); extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN ATTRIBUTE_COLD; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern void init_tree (void); extern bool pod_type_p (const_tree); extern bool layout_pod_type_p (const_tree); extern bool std_layout_type_p (const_tree); extern bool trivial_type_p (const_tree); extern bool trivially_copyable_p (const_tree); extern bool type_has_unique_obj_representations (const_tree); extern bool scalarish_type_p (const_tree); extern bool type_has_nontrivial_default_init (const_tree); extern bool type_has_nontrivial_copy_init (const_tree); extern void maybe_warn_parm_abi (tree, location_t); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree); extern bool check_abi_tag_args (tree, tree); extern tree strip_typedefs (tree, bool * = NULL); extern tree strip_typedefs_expr (tree, bool * = NULL); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (const_tree); extern cp_lvalue_kind real_lvalue_p (const_tree); extern cp_lvalue_kind lvalue_kind (const_tree); extern bool glvalue_p (const_tree); extern bool obvalue_p (const_tree); extern bool xvalue_p (const_tree); extern bool bitfield_p (const_tree); extern tree cp_stabilize_reference (tree); extern bool builtin_valid_in_constant_expr_p (const_tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt_loc (location_t, enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...); extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *); extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *); extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned); extern tree build_cplus_new (tree, tree, tsubst_flags_t); extern tree build_aggr_init_expr (tree, tree); extern tree get_target_expr (tree); extern tree get_target_expr_sfinae (tree, tsubst_flags_t); extern tree build_cplus_array_type (tree, tree); extern tree build_array_of_n_type (tree, int); extern bool array_of_runtime_bound_p (tree); extern tree build_array_copy (tree); extern tree build_vec_init_expr (tree, tree, tsubst_flags_t); extern void diagnose_non_constexpr_vec_init (tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern tree build_ref_qualified_type (tree, cp_ref_qualifier); inline tree ovl_first (tree) ATTRIBUTE_PURE; extern tree ovl_make (tree fn, tree next = NULL_TREE); extern tree ovl_insert (tree fn, tree maybe_ovl, bool using_p = false); extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE; extern void lookup_mark (tree lookup, bool val); extern tree lookup_add (tree fns, tree lookup); extern tree lookup_maybe_add (tree fns, tree lookup, bool deduping); extern void lookup_keep (tree lookup, bool keep); extern void lookup_list_keep (tree list, bool keep); extern int is_overloaded_fn (tree) ATTRIBUTE_PURE; extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE; extern tree dependent_name (tree); extern tree get_fns (tree) ATTRIBUTE_PURE; extern tree get_first_fn (tree) ATTRIBUTE_PURE; extern tree ovl_scope (tree); extern const char *cxx_printable_name (tree, int); extern const char *cxx_printable_name_translate (tree, int); extern tree canonical_eh_spec (tree); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern tree break_out_target_exprs (tree, bool = false); extern tree build_ctor_subob_ref (tree, tree, tree); extern tree replace_placeholders (tree, tree, bool * = NULL); extern bool find_placeholders (tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (const_tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (const_tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (const_tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_reference_type (tree, bool); extern tree move (tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern bool cv_qualified_p (const_tree); extern tree cv_unqualified (tree); extern special_function_kind special_function_p (const_tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern duration_kind decl_storage_duration (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, hash_set<tree> *); #define cp_walk_tree(tp,func,data,pset) \ walk_tree_1 (tp, func, data, pset, cp_walk_subtrees) #define cp_walk_tree_without_duplicates(tp,func,data) \ walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees) extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); extern bool cxx_type_hash_eq (const_tree, const_tree); extern tree cxx_copy_lang_qualifiers (const_tree, const_tree); extern void cxx_print_statistics (void); extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t); extern void cp_warn_deprecated_use (tree); /* in ptree.c */ extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (diagnostic_context *, const char *, struct diagnostic_info *); /* in typeck.c */ extern bool cxx_mark_addressable (tree, bool = false); extern int string_conv_p (const_tree, const_tree, int); extern tree cp_truthvalue_conversion (tree); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree require_complete_type_sfinae (tree, tsubst_flags_t); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); inline bool type_unknown_p (const_tree); enum { ce_derived, ce_type, ce_normal, ce_exact }; extern bool comp_except_specs (const_tree, const_tree, int); extern bool comptypes (tree, tree, int); extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); extern bool compparms (const_tree, const_tree); extern int comp_cv_qualification (const_tree, const_tree); extern int comp_cv_qualification (int, int); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool); extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool, bool); extern tree cxx_alignas_expr (tree); extern tree cxx_sizeof_nowarn (tree); extern tree is_bitfield_expr_with_lowered_type (const_tree); extern tree unlowered_expr_type (const_tree); extern tree decay_conversion (tree, tsubst_flags_t, bool = true); extern tree build_class_member_access_expr (cp_expr, tree, tree, bool, tsubst_flags_t); extern tree finish_class_member_access_expr (cp_expr, tree, bool, tsubst_flags_t); extern tree build_x_indirect_ref (location_t, tree, ref_operator, tsubst_flags_t); extern tree cp_build_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree cp_build_fold_indirect_ref (tree); extern tree build_array_ref (location_t, tree, tree); extern tree cp_build_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t); extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) ATTRIBUTE_SENTINEL; extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **, tsubst_flags_t); extern tree build_x_binary_op (location_t, enum tree_code, tree, enum tree_code, tree, enum tree_code, tree *, tsubst_flags_t); extern tree build_x_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree build_x_unary_op (location_t, enum tree_code, cp_expr, tsubst_flags_t); extern tree cp_build_addressof (location_t, tree, tsubst_flags_t); extern tree cp_build_addr_expr (tree, tsubst_flags_t); extern tree cp_build_unary_op (enum tree_code, tree, bool, tsubst_flags_t); extern tree genericize_compound_lvalue (tree); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_x_compound_expr_from_list (tree, expr_list_kind, tsubst_flags_t); extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *, const char *, tsubst_flags_t); extern tree build_x_compound_expr (location_t, tree, tree, tsubst_flags_t); extern tree build_compound_expr (location_t, tree, tree); extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); extern tree build_static_cast (tree, tree, tsubst_flags_t); extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t); extern tree build_const_cast (tree, tree, tsubst_flags_t); extern tree build_c_cast (location_t, tree, tree); extern cp_expr build_c_cast (location_t loc, tree type, cp_expr expr); extern tree cp_build_c_cast (tree, tree, tsubst_flags_t); extern cp_expr build_x_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree cp_build_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree convert_for_initialization (tree, tree, tree, int, impl_conv_rhs, tree, int, tsubst_flags_t); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree); extern bool error_type_p (const_tree); extern bool ptr_reasonably_similar (const_tree, const_tree); extern tree build_ptrmemfunc (tree, tree, int, bool, tsubst_flags_t); extern int cp_type_quals (const_tree); extern int type_memfn_quals (const_tree); extern cp_ref_qualifier type_memfn_rqual (const_tree); extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier); extern bool cp_has_mutable_p (const_tree); extern bool at_least_as_qualified_p (const_tree, const_tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree common_pointer_type (tree, tree); extern tree composite_pointer_type (tree, tree, tree, tree, composite_pointer_operation, tsubst_flags_t); extern tree merge_types (tree, tree); extern tree strip_array_domain (tree); extern tree check_return_expr (tree, bool *); extern tree cp_build_binary_op (location_t, enum tree_code, tree, tree, tsubst_flags_t); extern tree build_x_vec_perm_expr (location_t, tree, tree, tree, tsubst_flags_t); #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false, true) extern tree build_simple_component_ref (tree, tree); extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (location_t, tree, tsubst_flags_t); extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t); extern tree convert_ptrmem (tree, tree, bool, bool, tsubst_flags_t); extern int lvalue_or_else (tree, enum lvalue_use, tsubst_flags_t); extern void check_template_keyword (tree); extern bool check_raw_literal_operator (const_tree decl); extern bool check_literal_operator_args (const_tree, bool *, bool *); extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t); extern tree cp_perform_integral_promotions (tree, tsubst_flags_t); extern tree finish_left_unary_fold_expr (tree, int); extern tree finish_right_unary_fold_expr (tree, int); extern tree finish_binary_fold_expr (tree, tree, int); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (location_t, const_tree, const_tree, diagnostic_t); inline void cxx_incomplete_type_diagnostic (const_tree value, const_tree type, diagnostic_t diag_kind) { cxx_incomplete_type_diagnostic (EXPR_LOC_OR_LOC (value, input_location), value, type, diag_kind); } extern void cxx_incomplete_type_error (location_t, const_tree, const_tree); inline void cxx_incomplete_type_error (const_tree value, const_tree type) { cxx_incomplete_type_diagnostic (value, type, DK_ERROR); } extern void cxx_incomplete_type_inform (const_tree); extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void cxx_readonly_error (tree, enum lvalue_use); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern int abstract_virtuals_error (abstract_class_use, tree); extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t); extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t); extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int); extern tree split_nonconstant_init (tree, tree); extern bool check_narrowing (tree, tree, tsubst_flags_t); extern tree digest_init (tree, tree, tsubst_flags_t); extern tree digest_init_flags (tree, tree, int, tsubst_flags_t); extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (location_t, tree, tsubst_flags_t); extern tree build_m_component_ref (tree, tree, tsubst_flags_t); extern tree build_functional_cast (tree, tree, tsubst_flags_t); extern tree add_exception_specifier (tree, tree, int); extern tree merge_exception_specifiers (tree, tree); /* in mangle.c */ extern void init_mangle (void); extern void mangle_decl (tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree, tree); extern tree mangle_guard_variable (tree); extern tree mangle_tls_init_fn (tree); extern tree mangle_tls_wrapper_fn (tree); extern bool decl_tls_wrapper_p (tree); extern tree mangle_ref_init_variable (tree); extern char * get_mangled_vtable_map_var_name (tree); extern bool mangle_return_type_p (tree); extern tree mangle_decomp (tree, vec<tree> &); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern alias_set_type cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (const_tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern void init_shadowed_var_for_decl (void); extern bool cxx_block_may_fallthru (const_tree); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, gimple_seq *, gimple_seq *); extern void cp_genericize (tree); extern bool cxx_omp_const_qual_no_mutable (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern tree cxx_omp_clause_dtor (tree, tree); extern void cxx_omp_finish_clause (tree, gimple_seq *); extern bool cxx_omp_privatize_by_reference (const_tree); extern bool cxx_omp_disregard_value_expr (tree, bool); extern void cp_fold_function (tree); extern tree cp_fully_fold (tree); extern void clear_fold_cache (void); /* in name-lookup.c */ extern void suggest_alternatives_for (location_t, tree, bool); extern bool suggest_alternative_in_explicit_scope (location_t, tree, tree); extern tree strip_using_decl (tree); /* Tell the binding oracle what kind of binding we are looking for. */ enum cp_oracle_request { CP_ORACLE_IDENTIFIER }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier); extern cp_binding_oracle_function *cp_binding_oracle; /* in constraint.cc */ extern void init_constraint_processing (); extern bool constraint_p (tree); extern tree conjoin_constraints (tree, tree); extern tree conjoin_constraints (tree); extern tree get_constraints (tree); extern void set_constraints (tree, tree); extern void remove_constraints (tree); extern tree current_template_constraints (void); extern tree associate_classtype_constraints (tree); extern tree build_constraints (tree, tree); extern tree get_shorthand_constraints (tree); extern tree build_concept_check (tree, tree, tree = NULL_TREE); extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE); extern tree make_constrained_auto (tree, tree); extern void placeholder_extract_concept_and_args (tree, tree&, tree&); extern bool equivalent_placeholder_constraints (tree, tree); extern hashval_t hash_placeholder_constraint (tree); extern bool deduce_constrained_parameter (tree, tree&, tree&); extern tree resolve_constraint_check (tree); extern tree check_function_concept (tree); extern tree finish_template_introduction (tree, tree); extern bool valid_requirements_p (tree); extern tree finish_concept_name (tree); extern tree finish_shorthand_constraint (tree, tree); extern tree finish_requires_expr (tree, tree); extern tree finish_simple_requirement (tree); extern tree finish_type_requirement (tree); extern tree finish_compound_requirement (tree, tree, bool); extern tree finish_nested_requirement (tree); extern void check_constrained_friend (tree, tree); extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree); extern bool function_concept_check_p (tree); extern tree normalize_expression (tree); extern tree expand_concept (tree, tree); extern bool expanding_concept (); extern tree evaluate_constraints (tree, tree); extern tree evaluate_function_concept (tree, tree); extern tree evaluate_variable_concept (tree, tree); extern tree evaluate_constraint_expression (tree, tree); extern bool constraints_satisfied_p (tree); extern bool constraints_satisfied_p (tree, tree); extern tree lookup_constraint_satisfaction (tree, tree); extern tree memoize_constraint_satisfaction (tree, tree, tree); extern tree lookup_concept_satisfaction (tree, tree); extern tree memoize_concept_satisfaction (tree, tree, tree); extern tree get_concept_expansion (tree, tree); extern tree save_concept_expansion (tree, tree, tree); extern bool* lookup_subsumption_result (tree, tree); extern bool save_subsumption_result (tree, tree, bool); extern bool equivalent_constraints (tree, tree); extern bool equivalently_constrained (tree, tree); extern bool subsumes_constraints (tree, tree); extern bool strictly_subsumes (tree, tree); extern int more_constrained (tree, tree); extern void diagnose_constraints (location_t, tree, tree); /* in logic.cc */ extern tree decompose_conclusions (tree); extern bool subsumes (tree, tree); /* In class.c */ extern void cp_finish_injected_record_type (tree); /* in vtable-class-hierarchy.c */ extern void vtv_compute_class_hierarchy_transitive_closure (void); extern void vtv_generate_init_routine (void); extern void vtv_save_class_info (tree); extern void vtv_recover_class_info (void); extern void vtv_build_vtable_verify_fndecl (void); /* In constexpr.c */ extern void fini_constexpr (void); extern bool literal_type_p (tree); extern tree register_constexpr_fundef (tree, tree); extern bool is_valid_constexpr_fn (tree, bool); extern bool check_constexpr_ctor_body (tree, tree, bool); extern tree constexpr_fn_retval (tree); extern tree ensure_literal_type_for_constexpr_object (tree); extern bool potential_constant_expression (tree); extern bool is_constant_expression (tree); extern bool is_nondependent_constant_expression (tree); extern bool is_nondependent_static_init_expression (tree); extern bool is_static_init_expression (tree); extern bool potential_rvalue_constant_expression (tree); extern bool require_potential_constant_expression (tree); extern bool require_constant_expression (tree); extern bool require_rvalue_constant_expression (tree); extern bool require_potential_rvalue_constant_expression (tree); extern tree cxx_constant_value (tree, tree = NULL_TREE); extern tree cxx_constant_init (tree, tree = NULL_TREE); extern tree maybe_constant_value (tree, tree = NULL_TREE); extern tree maybe_constant_init (tree, tree = NULL_TREE); extern tree fold_non_dependent_expr (tree); extern tree fold_simple (tree); extern bool is_sub_constant_expr (tree); extern bool reduced_constant_expression_p (tree); extern bool is_instantiation_of_constexpr (tree); extern bool var_in_constexpr_fn (tree); extern bool var_in_maybe_constexpr_fn (tree); extern void explain_invalid_constexpr_fn (tree); extern vec<tree> cx_error_context (void); extern tree fold_sizeof_expr (tree); extern void clear_cv_and_fold_caches (void); /* In cp-ubsan.c */ extern void cp_ubsan_maybe_instrument_member_call (tree); extern void cp_ubsan_instrument_member_accesses (tree *); extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree); extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree); extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree); /* Inline bodies. */ inline tree ovl_first (tree node) { while (TREE_CODE (node) == OVERLOAD) node = OVL_FUNCTION (node); return node; } inline bool type_unknown_p (const_tree expr) { return TREE_TYPE (expr) == unknown_type_node; } inline hashval_t named_decl_hash::hash (const value_type decl) { tree name = OVL_NAME (decl); return name ? IDENTIFIER_HASH_VALUE (name) : 0; } inline bool named_decl_hash::equal (const value_type existing, compare_type candidate) { tree name = OVL_NAME (existing); return candidate == name; } inline bool null_node_p (const_tree expr) { STRIP_ANY_LOCATION_WRAPPER (expr); return expr == null_node; } #if CHECKING_P namespace selftest { extern void run_cp_tests (void); /* Declarations for specific families of tests within cp, by source file, in alphabetical order. */ extern void cp_pt_c_tests (); extern void cp_tree_c_tests (void); } // namespace selftest #endif /* #if CHECKING_P */ /* -- end of C++ */ #endif /* ! GCC_CP_TREE_H */
ClusterCreator.h
//class for running clustering algorithm on Charts struct ClusterCreator { //takes a chart id map that was created by grid/octree clusters, and creates a list of Chart objects static uint32_t create_chart_clusters_from_grid_clusters(Polyhedron &P, const double cost_threshold , const uint32_t chart_threshold, CLUSTER_SETTINGS cluster_settings, std::map<uint32_t, uint32_t> &chart_id_map, uint32_t num_initial_charts){ std::vector<Chart> charts; uint32_t num_charts = initialise_charts_from_grid_clusters(P, chart_id_map, charts, cluster_settings, chart_threshold); // check that existing chart number is not already lower than threshold /* if (num_charts <= chart_threshold) { std::cout << "Input to chart clusterer already had number of charts below chart threshold" << std::endl; return num_charts; } */ //recalculate perimeters of charts to ensure they are correct for (auto& chart : charts) { chart.recalculate_perimeter_from_scratch(); chart.create_neighbour_set(chart_id_map); } //create join list std::list<JoinOperation> joins; create_join_list_from_chart_vector(charts, joins, cluster_settings, chart_id_map); //do the clustering! // cluster_faces(charts, joins,cost_threshold,chart_threshold,cluster_settings,chart_id_map); uint32_t active_charts = populate_chart_LUT(charts, chart_id_map); //checking - how many charts have border edges? // uint32_t charts_with_borders = 0; // for(auto& chart : charts){if (chart.active && chart.has_border_edge) charts_with_borders++;} // std::cout << charts_with_borders << " of " << active_charts << " charts have borders\n"; return active_charts; } //given a chart_id_map, fill a list of chart objects which describe initial clustering state static uint32_t initialise_charts_from_grid_clusters(Polyhedron &P, std::map<uint32_t, uint32_t> &chart_id_map, std::vector<Chart> &charts, CLUSTER_SETTINGS cluster_settings, const uint32_t chart_threshold ){ //calculate areas of each face std::map<face_descriptor,double> fareas; std::map<face_descriptor,Vector> fnormals; std::vector<double> fareas_vec; std::vector<Vector> fnormals_vec; std::vector<std::shared_ptr<Facet> > faces_vec; // calculate_normals_and_areas(P,fnormals,fareas); std::cout << "Calculating face areas...\n"; for(face_descriptor fd: faces(P)){ fareas[fd] = CGAL::Polygon_mesh_processing::face_area (fd,P); } //debugging std::cout << "Checking areas...\n"; for (auto const& fd : fareas){ if (fd.second == 0 || std::isnan(fd.second) ) { std::cout << "Face " << fd.first->id() << " has area " << fd.second << std::endl; } } std::cout << "Calculating face normals...\n"; CGAL::Polygon_mesh_processing::compute_face_normals(P,boost::make_assoc_property_map(fnormals)); //get boost face iterator face_iterator fb_boost, fe_boost; boost::tie(fb_boost, fe_boost) = faces(P); //convert to arrays for (Facet_iterator fb = P.facets_begin(); fb != P.facets_end(); fb++) { faces_vec.push_back(std::make_shared<Facet>(*fb)); fareas_vec.push_back(fareas[*fb_boost]); fnormals_vec.push_back(fnormals[*fb_boost]); fb_boost++; } std::cout << "Creating charts from grid clusters...\n"; //to create chart vector //create vector of vectors, each chart has a vector of face ids // then creation of each chart can be parallelised once vactors are created // add new chart method where chart is built from a list of faces rather than incrementally //inverse index (K = chart, V = face ids) std::map<uint32_t, std::vector<uint32_t> > faces_per_chart; for (auto face_entry : chart_id_map) { //face entry: Key = face id, Value = chart id faces_per_chart[face_entry.second].push_back(face_entry.first); } //convert to vector of vectors std::vector<std::vector<uint32_t> > faces_per_chart_vector; for (auto& face_list_entry : faces_per_chart) { std::sort(face_list_entry.second.begin(), face_list_entry.second.end()); faces_per_chart_vector.push_back(face_list_entry.second); } std::cout << "Found " << faces_per_chart_vector.size() << " charts in chart_id_map\n"; /* if (faces_per_chart_vector.size() <= chart_threshold) { std::cout << "Chart threshold already reached\n"; return faces_per_chart_vector.size(); } */ std::cout << "Compiling in to chart objects...\n"; // resize vector before paralellisation charts.resize(faces_per_chart_vector.size()); //for each chart #pragma omp parallel for for (uint32_t i = 0; i < faces_per_chart_vector.size(); ++i) { Chart chart_local; //go through list of faces, build charts std::vector<uint32_t>& face_list = faces_per_chart_vector[i]; //get begin iterators // uint32_t current_position = 0; // Facet_iterator fi = P.facets_begin(); // face_iterator fb_boost, fe_boost; // boost::tie(fb_boost, fe_boost) = faces(P); for (uint32_t f = 0; f < face_list.size(); ++f) { uint32_t face_id = face_list[f]; // uint32_t to_advance = face_id - current_position; //advance iterator by required number of steps // std::advance(fi, to_advance); // std::advance(fb_boost, to_advance); //create chart from this face, and merge if not the first if (f == 0) { // chart_local = Chart(i,*fi, fnormals[*fb_boost], fareas[*fb_boost]); chart_local = Chart(i,faces_vec[face_id], fnormals_vec[face_id], fareas_vec[face_id]); } else { // chart_local.add_facet(*fi, fnormals[*fb_boost], fareas[*fb_boost]); chart_local.add_facet(faces_vec[face_id], fnormals_vec[face_id], fareas_vec[face_id]); } // current_position = face_id; }//for each face //to calculate perimeter and avg normal properly chart_local.update_after_quick_merge(); // charts.push_back(chart_local); charts[i] = chart_local; }//for each chart //sort the charts in order of id // std::sort(charts.begin(), charts.end(), Chart::sort_by_id); std::cout << "Created " << charts.size() << " charts from grid clusters" << std::endl; return charts.size(); } static void create_join_list_from_chart_vector(std::vector<Chart> &charts, std::list<JoinOperation> &joins, CLUSTER_SETTINGS cluster_settings, std::map<uint32_t, uint32_t> &chart_id_map){ std::cout << "Creating joins from chart list...\n"; std::set<uint32_t> processed_charts; std::set<uint32_t> chart_neighbours; //for each chart for (auto& chart : charts) { chart_neighbours.clear(); // uint32_t this_chart_id = chart.id() // for each face in chart, find neighbours, add to set for (auto& face : chart.facets) { //for each edge Halfedge_facet_circulator fc = face->facet_begin(); do { if (!fc->is_border() && !(fc->opposite()->is_border()) )//guard against no neighbour at this edge { //get chart id of neighbour, add to set if it is not this chart uint32_t nbr_face_id = fc->opposite()->facet()->id(); uint32_t nbr_chart_id = chart_id_map[nbr_face_id]; if (nbr_chart_id != chart.id){ chart_neighbours.insert(nbr_chart_id); } } } while ( ++fc != face->facet_begin()); } // std::cout << "found " << chart_neighbours.size() << " unique neighbours for chart " << chart.id << std::endl; // int added_joins = 0; //create joins... //if neighbouts have not already been processed, create join between this and neighbour for (auto& nbr_chart_id : chart_neighbours) { //make sure it hasnt been processed already if (processed_charts.find(nbr_chart_id) == processed_charts.end()) { // chart ids should be equal to their index in the vector at this point JoinOperation join (chart.id, nbr_chart_id ,JoinOperation::cost_of_join(charts[chart.id],charts[nbr_chart_id], cluster_settings)); joins.push_back(join); //TODO don't add duplicate joins - where charts share many edges // added_joins++; } } // std::cout << "Added " << added_joins << " joins\n"; //add this chart to set of processed charts, so that it is not considered for new joins processed_charts.insert(chart.id); } std::cout << "Created " << joins.size() << " joins\n"; } //takes a polymesh and creates a list of Chart objects, one for each face //and a list of joins between all charts static uint32_t create_chart_clusters_from_faces (Polyhedron &P, const double cost_threshold , const uint32_t chart_threshold, CLUSTER_SETTINGS cluster_settings, std::map<uint32_t, uint32_t> &chart_id_map){ std::stringstream report; report << "--------------------\nReport:\n----------------------\n"; //calculate areas of each face std::cout << "Calculating face areas...\n"; std::map<face_descriptor,double> fareas; for(face_descriptor fd: faces(P)){ fareas[fd] = CGAL::Polygon_mesh_processing::face_area (fd,P); } //calculate normals of each faces std::cout << "Calculating face normals...\n"; std::map<face_descriptor,Vector> fnormals; CGAL::Polygon_mesh_processing::compute_face_normals(P,boost::make_assoc_property_map(fnormals)); //get boost face iterator face_iterator fb_boost, fe_boost; boost::tie(fb_boost, fe_boost) = faces(P); //each face begins as its own chart std::cout << "Creating initial charts...\n"; std::vector<Chart> charts; for ( Facet_iterator fb = P.facets_begin(); fb != P.facets_end(); ++fb){ //init chart instance for face Chart c(charts.size(),std::make_shared<Facet>(*fb), fnormals[*fb_boost], fareas[*fb_boost]); charts.push_back(c); fb_boost++; } //create possible join list/queue. Each original edge in the mesh becomes a join (if not a boundary edge) std::cout << "Creating initial joins...\n"; std::list<JoinOperation> joins; std::list<JoinOperation>::iterator it; int edgecount = 0; for( Edge_iterator eb = P.edges_begin(), ee = P.edges_end(); eb != ee; ++ eb){ edgecount++; //only create join if halfedge is not a boundary edge if ( !(eb->is_border()) && !(eb->opposite()->is_border()) ) { uint32_t face1 = eb->facet()->id(); uint32_t face2 = eb->opposite()->facet()->id(); JoinOperation join (face1,face2,JoinOperation::cost_of_join(charts[face1],charts[face2], cluster_settings)); joins.push_back(join); } } std::cout << joins.size() << " joins\n" << edgecount << " edges\n"; // cluster_faces(charts, joins, cost_threshold, chart_threshold, cluster_settings,chart_id_map); return populate_chart_LUT(charts, chart_id_map); } static uint32_t populate_chart_LUT(std::vector<Chart> &charts, std::map<uint32_t, uint32_t> &chart_id_map){ chart_id_map.clear(); //populate LUT for face to chart mapping //count charts on the way to apply new chart ids uint32_t active_charts = 0; for (uint32_t id = 0; id < charts.size(); ++id) { auto& chart = charts[id]; if (chart.active) { for (auto& f : chart.facets) { chart_id_map[f->id()] = active_charts; } active_charts++; } } return active_charts; } };
idasFoodWeb_bnd_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * Based on idaFoodWeb_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDAS: Food web problem. * * This example program (OpenMP version) uses the SUNBAND linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDAS using the SUNBAND linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idasFoodWeb_bnd_omp * To specify the number of threads at the command line, use * % ./idasFoodWeb_bnd_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); LS = SUNLinSol_Band(cc, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, A); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int) mu, (long int) ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre+nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
edge_data.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Antonia Larese // #if !defined(KRATOS_EDGE_DATA_H_INCLUDED ) #define KRATOS_EDGE_DATA_H_INCLUDED //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "free_surface_application.h" #include "utilities/openmp_utils.h" namespace Kratos { // template<unsigned int TDim> // class EdgeConstructionScratch // { // public: // array_1d<double, TDim+1> N; // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx; // double volume; // double weighting_factor = 1.0 / static_cast<double>(TDim+1); // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent; // array_1d<double, TDim+1> mass_lumped; // array_1d<unsigned int, TDim+1> nodal_indices; // array_1d<double, TDim+1> heights; // // } //structure definition for fast access to edge data using CSR format template<unsigned int TDim> class EdgesStructureType { public: //component ij of the consistent mass matrix (M = Ni * Nj * dOmega) double Mass; //components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega) //double Laplacian; boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ; //components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega) array_1d<double, TDim> Ni_DNj; //components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega) //TRANSPOSED GRADIENT array_1d<double, TDim> DNi_Nj; //************************************************************************************* //************************************************************************************* //gradient integrated by parts //RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i //ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually" inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj[k]*v[k] inline void Add_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } inline void Sub_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj pj inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * (p_j - p_i); } inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * (p_j - p_i); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += DNi_Nj[k]*v[k] inline void Add_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } inline void Sub_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } //************************************************************************************* //************************************************************************************* //gets the trace of the laplacian matrix inline void CalculateScalarLaplacian(double& l_ij) { l_ij = LaplacianIJ(0, 0); for (unsigned int comp = 1; comp < TDim; comp++) l_ij += LaplacianIJ(comp, comp); } inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; #endif } inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; #endif } inline void Sub_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination -= temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination -= aux_j * phi_j - aux_i * phi_i; #endif } inline void Add_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination += temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination += aux_j * phi_j - aux_i * phi_i; #endif } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // double temp = 0.0; // double lij = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // lij += LaplacianIJ(k_comp,k_comp); // temp = a_i[k_comp] * a_i[k_comp]; // } // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]); } // inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low, // const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i, // const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j // ) // { // double conv_stab = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) // conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp); // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // //// adding pressure // double press_diff = p_j-p_i; // for (unsigned int j_comp = 0; j_comp < TDim; j_comp++) // { // for (unsigned int i_comp = 0; i_comp < TDim; i_comp++) // stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ; // } // // // } inline void CalculateConvectionStabilization_LOW(double& stab_low, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); stab_low = conv_stab * (phi_j - phi_i); } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_j += a_i[k_comp] * Ni_DNj[k_comp]; // temp_i += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_i += a_i[k_comp] * Ni_DNj[k_comp]; // temp_j += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]); #endif } inline void CalculateConvectionStabilization_HIGH(double& stab_high, const array_1d<double, TDim>& a_i, const double& pi_i, const array_1d<double, TDim>& a_j, const double& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } stab_high = -(aux_j * pi_j - aux_i * pi_i); #endif } //************************************************************************************* //************************************************************************************* inline void Add_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Add_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination += tau * (stab_low - beta * stab_high); } inline void Sub_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Sub_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination -= tau * (stab_low - beta * stab_high); } //************************************************************************************* //************************************************************************************* inline void Add_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]); } inline void Sub_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]); } }; //class definition of matrices using CSR format template<unsigned int TDim, class TSparseSpace> class MatrixContainer { public: //name for the self defined structure typedef EdgesStructureType<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //names for separately stored node based values typedef vector<double> ValuesVectorType; // typedef std::vector< array_1d<double,TDim> > CalcVectorType; typedef vector< array_1d<double, TDim> > CalcVectorType; //constructor and destructor MatrixContainer() { }; ~MatrixContainer() { }; //functions to return private values inline unsigned int GetNumberEdges() { return mNumberEdges; } inline EdgesVectorType& GetEdgeValues() { return mNonzeroEdgeValues; } inline IndicesVectorType& GetColumnIndex() { return mColumnIndex; } inline IndicesVectorType& GetRowStartIndex() { return mRowStartIndex; } inline ValuesVectorType& GetLumpedMass() { return mLumpedMassMatrix; } inline ValuesVectorType& GetInvertedMass() { return mInvertedMassMatrix; } inline CalcVectorType& GetDiagGradient() { return mDiagGradientMatrix; } inline ValuesVectorType& GetHmin() { return mHmin; } //******************************************************** //function to size and initialize the vector of CSR tuples void ConstructCSRVector(ModelPart& model_part) { KRATOS_TRY //SIZE OF CSR VECTOR //defining the number of nodes and edges int n_nodes = model_part.Nodes().size(); //remark: no colouring algorithm is used here (symmetry is neglected) // respectively edge ij is considered different from edge ji mNumberEdges = 0; //counter to assign and get global nodal index int i_node = 0; //counting the edges connecting the nodes for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++) { //counting neighbours of each node mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size(); //DIAGONAL TERMS //mNumberEdges++; //assigning global index to each node node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++); } //error message in case number of nodes does not coincide with number of indices if (i_node != n_nodes) KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!"); //allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues); mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex); mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex); mLumpedMassMatrix.resize(n_nodes); SetToZero(mLumpedMassMatrix); mInvertedMassMatrix.resize(n_nodes); SetToZero(mInvertedMassMatrix); mDiagGradientMatrix.resize(n_nodes); SetToZero(mDiagGradientMatrix); mHmin.resize(n_nodes); SetToZero(mHmin); //INITIALIZING OF THE CSR VECTOR //temporary variable as the row start index of a node depends on the number of neighbours of the previous one unsigned int row_start_temp = 0; int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<int> row_partition(number_of_threads); OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++) { typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i; //main loop over all nodes // for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++) // { //getting the global index of the node i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX)); //determining its neighbours WeakPointerVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES); //number of neighbours of node i determines row start index for the following node unsigned int n_neighbours = neighb_nodes.size(); //DIAGONAL TERMS //n_neighbours++; //reserving memory for work array std::vector<unsigned int> work_array; work_array.reserve(n_neighbours); //DIAGONAL TERMS //work_array.push_back(i_node); //nested loop over the neighbouring nodes for (WeakPointerVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++) { //getting global index of the neighbouring node work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX))); } //reordering neighbours following their global indices std::sort(work_array.begin(), work_array.end()); //setting current row start index mRowStartIndex[i_node] = row_start_temp; //nested loop over the by now ordered neighbours for (unsigned int counter = 0; counter < n_neighbours; counter++) { //getting global index of the neighbouring node unsigned int j_neighbour = work_array[counter]; //calculating CSR index unsigned int csr_index = mRowStartIndex[i_node] + counter; //saving column index j of the original matrix mColumnIndex[csr_index] = j_neighbour; //initializing the CSR vector entries with zero mNonzeroEdgeValues[csr_index].Mass = 0.0; //mNonzeroEdgeValues[csr_index].Laplacian = 0.0; noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim); noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim); //TRANSPOSED GRADIENT noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim); } //preparing row start index for next node row_start_temp += n_neighbours; } } } //adding last entry (necessary for abort criterion of loops) mRowStartIndex[n_nodes] = mNumberEdges; //INITIALIZING NODE BASED VALUES //lumped mass matrix (elements Mi) /* #pragma omp parallel for for (int i_node=0; i_node<n_nodes; i_node++) mLumpedMassMatrix[i_node] = 0.0;*/ #pragma omp parallel for //set the heights to a huge number for (int i_node = 0; i_node < n_nodes; i_node++) mHmin[i_node] = 1e10; //diagonal of gradient matrix (elements Gii) // #pragma omp parallel for // for (int i_node=0; i_node<n_nodes; i_node++) // noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim); KRATOS_CATCH("") } //********************************* //function to precalculate CSR data void BuildCSRData(ModelPart& model_part) { KRATOS_TRY //PRECALCULATING CSR DATA //defining temporary local variables for elementwise addition //shape functions array_1d<double, TDim + 1 > N; //shape function derivatives boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx; //volume double volume; //weighting factor double weighting_factor = 1.0 / static_cast<double> (TDim + 1); //elemental matrices boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent; //boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian; array_1d<double, TDim + 1 > mass_lumped; //global indices of elemental nodes array_1d<unsigned int, TDim + 1 > nodal_indices; array_1d<double, TDim + 1 > heights; //loop over all elements for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++) { //LOCAL ELEMENTWISE CALCULATIONS //getting geometry data of the element GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume); //calculate lenght of the heights of the element for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0); for (unsigned int comp = 1; comp < TDim; comp++) { heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp); } heights[ie_node] = 1.0 / sqrt(heights[ie_node]); // KRATOS_WATCH(heights); } //setting up elemental mass matrices CalculateMassMatrix(mass_consistent, volume); noalias(mass_lumped) = ZeroVector(TDim + 1); for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume; mass_lumped[ie_node] += mass_consistent(ie_node, je_node); } //mass_lumped[ie_node] = volume * N[ie_node]; } /*OLD DATA STRUCTURE //calculating elemental laplacian matrix noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1); for (unsigned int ie_node=0; ie_node<=TDim; ie_node++) for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++) //componentwise multiplication for (unsigned int component=0; component<TDim; component++) { //taking advantage of symmetry double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume; laplacian(ie_node,je_node) += temp; laplacian(je_node,ie_node) += temp; } //multiply gradient with volume referring to each gauss point dN_dx *= (volume / double(TDim+1));*/ //(corresponding to Ni * dOmega respectively Nj * dOmega) double weighted_volume = volume * weighting_factor; //ASSEMBLING GLOBAL DATA STRUCTURE //loop over the nodes of the element to determine their global indices for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX)); //assembling global "edge matrices" by adding local contributions for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //check the heights and change the value if minimal is found if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node]) mHmin[ nodal_indices[ie_node] ] = heights[ie_node]; for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //remark: there is no edge linking node i with itself! //DIAGONAL TERMS if (ie_node != je_node) { //calculating CSR index from global index unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]); //assigning precalculated element data to the referring edges //contribution to edge mass mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node); //contribution to edge laplacian /*OLD DATA STRUCTURE mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/ boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume; //contribution to edge gradient array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //gradient[l_comp] += dN_dx(je_node,l_comp); gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume; //TRANSPOSED GRADIENT //contribution to transposed edge gradient array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //transp_gradient[l_comp] += dN_dx(ie_node,l_comp); transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume; } } } //assembling node based vectors for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) //diagonal of the global lumped mass matrix mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node]; for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //diagonal of the global gradient matrix array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]]; for (unsigned int component = 0; component < TDim; component++) //gradient[component] += dN_dx(ie_node,component); gradient[component] += dN_dx(ie_node, component) * weighted_volume; } } //copy mass matrix to inverted mass matrix for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode]; } //perform MPI syncronization between the domains //calculating inverted mass matrix (this requires syncronization for MPI paraellelism for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode]; } KRATOS_CATCH("") } //****************************************** //function to calculate CSR index of edge ij unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning CSR index of edge ij return csr_index; KRATOS_CATCH("") } //*********************************************** //function to get pointer to CSR tuple of edge ij CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning pointer to CSR tuple of edge ij return &mNonzeroEdgeValues[csr_index]; KRATOS_CATCH("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mNonzeroEdgeValues.clear(); mColumnIndex.clear(); mRowStartIndex.clear(); mInvertedMassMatrix.clear(); mLumpedMassMatrix.clear(); mDiagGradientMatrix.clear(); mHmin.clear(); KRATOS_CATCH("") } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); #pragma omp parallel for firstprivate(n_nodes, it_begin) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = (*node_it)[component]; } KRATOS_CATCH(""); } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get reference of destination array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos); //save vector in database for (unsigned int component = 0; component < TDim; component++) vector[component] = (rOrigin[i_node])[component]; } KRATOS_CATCH(""); } void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); int i_node = i; //get reference of destination double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save scalar in database scalar = rOrigin[i_node]; } KRATOS_CATCH(""); } //********************************************************************* //destination = origin1 + value * Minv*origin void Add_Minv_value( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; double temp = value * m_inv; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = origin_vec1[comp] + temp * origin_value[comp]; } KRATOS_CATCH("") } void Add_Minv_value( ValuesVectorType& destination, const ValuesVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const ValuesVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const double& origin_vec1 = origin1[i_node]; const double& origin_value = origin[i_node]; double temp = value * m_inv; dest = origin_vec1 + temp * origin_value; } KRATOS_CATCH("") } //********************************************************************** void AllocateAndSetToZero(CalcVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void AllocateAndSetToZero(ValuesVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void SetToZero(CalcVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void SetToZero(ValuesVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void AssignVectorToVector(const CalcVectorType& origin, CalcVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { const array_1d<double, TDim>& orig = origin[i_node]; array_1d<double, TDim>& dest = destination[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = orig[comp]; } } void AssignVectorToVector(const ValuesVectorType& origin, ValuesVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { destination[i_node] = origin[i_node]; } } private: //number of edges unsigned int mNumberEdges; //CSR data vector for storage of the G, L and consistent M components of edge ij EdgesVectorType mNonzeroEdgeValues; //vector to store column indices of nonzero matrix elements for each row IndicesVectorType mColumnIndex; //index vector to access the start of matrix row i in the column vector IndicesVectorType mRowStartIndex; //inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours) ValuesVectorType mInvertedMassMatrix; //minimum height around one node ValuesVectorType mHmin; //lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix) ValuesVectorType mLumpedMassMatrix; //diagonal of the gradient matrix (separately stored due to special calculations) CalcVectorType mDiagGradientMatrix; //******************************************* //functions to set up elemental mass matrices void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6 //non-diagonal terms double temp = 0.08333333333333333333 * volume; // 1/12 for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.1 * volume; //non-diagonal terms double temp = 0.05 * volume; for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } }; } //namespace Kratos #endif //KRATOS_EDGE_DATA_H_INCLUDED defined
9.norace6.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 40 int main() { int F[N], x = 1, y = 1; #pragma omp parallel { #pragma omp single firstprivate(x, y) for (int i = 3; i < N; i++) { F[i] = x + y; y = x; x = F[i]; } } return 0; } // We do not support inter SCoP data races for now // CHECK: Region is Data Race Free. // END
section.c
#include <omp.h> #include <stdio.h> #define N 100 #define chunk 10 main () { int i; float a[N], b[N], c[N], d[N]; /* Some initializations */ for (i=0; i < N; i++) { // a[i] = i * 1.5; // b[i] = i + 22.35; a[i] = i * 1.0; b[i] = i * 1.0; } #pragma omp parallel shared(a,b,c,d) private(i) { #pragma omp sections nowait { #pragma omp section for (i=0; i < N; i++) c[i] = a[i] + b[i]; #pragma omp section for (i=0; i < N; i++) d[i] = a[i] * b[i]; } /* end of sections */ } /* end of parallel section */ #pragma omp parallel shared(a,b,c,d) private(i) { #pragma omp for schedule(dynamic,chunk) nowait for (i=0; i<N; i++) printf("c[%d] = %3.1f ; d[%d] = %4.2f \n", i, c[i],i,d[i]); } }
core_cttmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zttmqr.c, normal z -> c, Fri Sep 28 17:38:25 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_ttmqr * * Overwrites the general m1-by-n1 tile A1 and * m2-by-n2 tile A2 with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q * | A2 | * * trans = Plasma_ConjTrans Q^H * | A1 | | A1 A2 | * Q^H * | A2 | * * where Q is a complex unitary matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_core_cttqrt. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : Apply Q; * - Plasma_ConjTrans : Apply Q^H. * * @param[in] m1 * The number of rows of the tile A1. m1 >= 0. * * @param[in] n1 * The number of columns of the tile A1. n1 >= 0. * * @param[in] m2 * The number of rows of the tile A2. m2 >= 0. * m2 = m1 if side == PlasmaRight. * * @param[in] n2 * The number of columns of the tile A2. n2 >= 0. * n2 = n1 if side == PlasmaLeft. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m1-by-n1 tile A1. * On exit, A1 is overwritten by the application of Q. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m1). * * @param[in,out] A2 * On entry, the m2-by-n2 tile A2. * On exit, A2 is overwritten by the application of Q. * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m2). * * @param[in] V * The i-th row must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, as returned by * plasma_core_cttqrt in the first k columns of its array argument V. * * @param[in] ldv * The leading dimension of the array V. ldv >= max(1,k). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param work * Auxiliary workspace array of length * ldwork-by-n1 if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m1) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_cttmqr(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, plasma_complex32_t *A1, int lda1, plasma_complex32_t *A2, int lda2, const plasma_complex32_t *V, int ldv, const plasma_complex32_t *T, int ldt, plasma_complex32_t *work, int ldwork) { // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_coreblas_error("illegal value of side"); return -1; } if ((trans != PlasmaNoTrans) && (trans != Plasma_ConjTrans)) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m1 < 0) { plasma_coreblas_error("illegal value of m1"); return -3; } if (n1 < 0) { plasma_coreblas_error("illegal value of n1"); return -4; } if ((m2 < 0) || ((m2 != m1) && (side == PlasmaRight))) { plasma_coreblas_error("illegal value of m2"); return -5; } if ((n2 < 0) || ((n2 != n1) && (side == PlasmaLeft))) { plasma_coreblas_error("illegal value of n2"); return -6; } if ((k < 0) || ((side == PlasmaLeft) && (k > m1)) || ((side == PlasmaRight) && (k > n1))) { plasma_coreblas_error("illegal value of k"); return -7; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -8; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -9; } if (lda1 < imax(1, m1)) { plasma_coreblas_error("illegal value of lda1"); return -10; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -11; } if (lda2 < imax(1, m2)) { plasma_coreblas_error("illegal value of lda2"); return -12; } if (V == NULL) { plasma_coreblas_error("NULL V"); return -13; } if (ldv < imax(1, side == PlasmaLeft ? m2 : n2)) { plasma_coreblas_error("illegal value of ldv"); return -14; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -15; } if (ldt < imax(1,ib)) { plasma_coreblas_error("illegal value of ldt"); return -16; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -17; } if (ldwork < imax(1, side == PlasmaLeft ? ib : m1)) { plasma_coreblas_error("Illegal value of ldwork"); return -18; } // quick return if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int mi = m1; int ni = n1; int mi2 = m2; int ni2 = n2; int l = 0; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = kb; //m1 - i; mi2 = imin(i+kb, m2); ic = i; l = imin(kb, imax(0, m2-i)); } else { ni = kb; ni2 = imin(i+kb, n2); jc = i; l = imin(kb, imax(0, n2-i)); } // Apply H or H^H (NOTE: plasma_core_cparfb used to be core_zttrfb). plasma_core_cparfb(side, trans, PlasmaForward, PlasmaColumnwise, mi, ni, mi2, ni2, kb, l, &A1[lda1*jc+ic], lda1, A2, lda2, &V[ldv*i], ldv, &T[ldt*i], ldt, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_cttmqr(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, plasma_complex32_t *A1, int lda1, plasma_complex32_t *A2, int lda2, const plasma_complex32_t *V, int ldv, const plasma_complex32_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*n1]) \ depend(inout:A2[0:lda2*n2]) \ depend(in:V[0:ldv*k]) \ depend(in:T[0:ib*k]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex32_t *W = (plasma_complex32_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? ib : m1; // TODO: float check // Call the kernel. int info = plasma_core_cttmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_cttmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *composite_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *composite_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the destination image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o composite_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'composite_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'composite_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the composite_image. Typically used for repeated tiling % of the composite_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for destination preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; ** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also definate that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity* p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification. */ if (Sca*Da + Dca*Sa <= Sa*Da) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa)); #else /* March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return( Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) ); return( Dca*Sa*Sa/(Sa-Sca) + Sca*(1.0-Da) + Dca*(1.0-Sa) ); #endif #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ MagickRealType gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs(p->opacity - q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=fabs(p->red - q->red); if ( (channel & GreenChannel) != 0 ) composite->green=fabs(p->green - q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs(p->blue - q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs(p->index - q->index); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Exclusion(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Exclusion(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Exclusion(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Exclusion(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static void CompositeHSB(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *saturation,double *brightness) { MagickRealType delta, max, min; /* Convert RGB to HSB colorspace. */ assert(hue != (double *) NULL); assert(saturation != (double *) NULL); assert(brightness != (double *) NULL); max=(red > green ? red : green); if (blue > max) max=blue; min=(red < green ? red : green); if (blue < min) min=blue; *hue=0.0; *saturation=0.0; *brightness=(double) (QuantumScale*max); if (max == 0.0) return; *saturation=(double) (1.0-min/max); delta=max-min; if (delta == 0.0) return; if (red == max) *hue=(double) ((green-blue)/delta); else if (green == max) *hue=(double) (2.0+(blue-red)/delta); else if (blue == max) *hue=(double) (4.0+(red-green)/delta); *hue/=6.0; if (*hue < 0.0) *hue+=1.0; } static inline MagickRealType In(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ MagickRealType gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { MagickRealType Sa, Da, gamma; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ return(Sca + Dca - 2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Sa, Da, gamma; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel=p+q; if (pixel > QuantumRange) pixel-=(QuantumRange+1.0); return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q, const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { MagickRealType Sa, Da, gamma; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel=p-q; if (pixel < 0.0) pixel+=(QuantumRange+1.0); return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q, const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { MagickRealType Sa, Da, gamma; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa, Da, gamma; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2*Sca)/Da+Sca*(2*Dca+1-Da)+Dca*(1-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Sa, Da, gamma; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=QuantumScale; Da*=QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification -- was found to be incorrect See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html. */ if (2.0*Sca < Sa) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (8.0*Dca <= Da) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); #else MagickRealType alpha, beta; /* New specification: March 2009 SVG specification. */ alpha=Dca/Da; if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); #endif } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Depreciated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1-Da)+Dca*(1-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } static void HSBComposite(const double hue,const double saturation, const double brightness,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType f, h, p, q, t; /* Convert HSB to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); if (saturation == 0.0) { *red=(MagickRealType) QuantumRange*brightness; *green=(*red); *blue=(*red); return; } h=6.0*(hue-floor(hue)); f=h-floor((double) h); p=brightness*(1.0-saturation); q=brightness*(1.0-saturation*f); t=brightness*(1.0-saturation*(1.0-f)); switch ((int) h) { case 0: default: { *red=(MagickRealType) QuantumRange*brightness; *green=(MagickRealType) QuantumRange*t; *blue=(MagickRealType) QuantumRange*p; break; } case 1: { *red=(MagickRealType) QuantumRange*q; *green=(MagickRealType) QuantumRange*brightness; *blue=(MagickRealType) QuantumRange*p; break; } case 2: { *red=(MagickRealType) QuantumRange*p; *green=(MagickRealType) QuantumRange*brightness; *blue=(MagickRealType) QuantumRange*t; break; } case 3: { *red=(MagickRealType) QuantumRange*p; *green=(MagickRealType) QuantumRange*q; *blue=(MagickRealType) QuantumRange*brightness; break; } case 4: { *red=(MagickRealType) QuantumRange*t; *green=(MagickRealType) QuantumRange*p; *blue=(MagickRealType) QuantumRange*brightness; break; } case 5: { *red=(MagickRealType) QuantumRange*brightness; *green=(MagickRealType) QuantumRange*p; *blue=(MagickRealType) QuantumRange*q; break; } } } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *composite_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,composite_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite_image,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *composite_view, *image_view; const char *value; double sans; ExceptionInfo *exception; GeometryInfo geometry_info; Image *destination_image; MagickBooleanType modify_outside_overlay, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, destination_dissolve, midpoint, percent_brightness, percent_saturation, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite_image != (Image *) NULL); assert(composite_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); destination_image=(Image *) NULL; amount=0.5; destination_dissolve=1.0; modify_outside_overlay=MagickFalse; percent_brightness=100.0; percent_saturation=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify destination outside the overlaid region. */ modify_outside_overlay=MagickTrue; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (composite_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) composite_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) composite_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); composite_view=AcquireCacheView(composite_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *composite_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, composite_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } composite_indexes=GetCacheViewVirtualIndexQueue(composite_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) CopyMagickMemory(q,p,composite_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (composite_indexes != (const IndexPacket *) NULL)) (void) CopyMagickMemory(indexes,composite_indexes, composite_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify destination outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); modify_outside_overlay=MagickTrue; break; } case BlurCompositeOp: { CacheView *composite_view, *destination_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ destination_image=CloneImage(image,image->columns,image->rows,MagickTrue, &image->exception); if (destination_image == (Image *) NULL) return(MagickFalse); /* Determine the horizontal and vertical maximim blur. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0 ) { destination_image=DestroyImage(destination_image); return(MagickFalse); } width=geometry_info.rho; height=geometry_info.sigma; blur.x1=geometry_info.rho; blur.x2=0.0; blur.y1=0.0; blur.y2=geometry_info.sigma; angle_start=0.0; angle_range=0.0; if ((flags & HeightValue) == 0) blur.y2=blur.x1; if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Blur Image by resampling. */ pixel=zero; exception=(&image->exception); resample_filter=AcquireResampleFilter(image,&image->exception); SetResampleFilter(resample_filter,CubicFilter,2.0); destination_view=AcquireCacheView(destination_image); composite_view=AcquireCacheView(composite_image); for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register PixelPacket *restrict r; register IndexPacket *restrict destination_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); for (x=0; x < (ssize_t) composite_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetBluePixelComponent(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } ScaleResampleFilter(resample_filter,blur.x1*QuantumScale* GetRedPixelComponent(p),blur.y1*QuantumScale* GetGreenPixelComponent(p),blur.x2*QuantumScale* GetRedPixelComponent(p),blur.y2*QuantumScale* GetGreenPixelComponent(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel); SetPixelPacket(destination_image,&pixel,r,destination_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); composite_view=DestroyCacheView(composite_view); destination_view=DestroyCacheView(destination_view); composite_image=destination_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *composite_view, *destination_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *restrict destination_indexes; register PixelPacket *restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ destination_image=CloneImage(image,image->columns,image->rows,MagickTrue, &image->exception); if (destination_image == (Image *) NULL) return(MagickFalse); SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue|HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (composite_image->columns-1.0)/ 2.0; vertical_scale=(MagickRealType) (composite_image->rows-1.0)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1.0)/2.0; vertical_scale=(MagickRealType) (image->rows-1.0)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(composite_image->columns-1.0)/200.0; vertical_scale*=(composite_image->rows-1.0)/200.0; } else { horizontal_scale*=(image->columns-1.0)/200.0; vertical_scale*=(image->rows-1.0)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) == 0) center.x=(MagickRealType) x_offset+(composite_image->columns-1)/ 2.0; else center.x=((MagickRealType) image->columns-1)/2.0; else if ((flags & AspectValue) == 0) center.x=(MagickRealType) x_offset+geometry_info.xi; else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) == 0) center.y=(MagickRealType) y_offset+(composite_image->rows-1)/2.0; else center.y=((MagickRealType) image->rows-1)/2.0; else if ((flags & AspectValue) == 0) center.y=(MagickRealType) y_offset+geometry_info.psi; else center.y=geometry_info.psi; } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; exception=(&image->exception); image_view=AcquireCacheView(image); destination_view=AcquireCacheView(destination_image); composite_view=AcquireCacheView(composite_image); for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); for (x=0; x < (ssize_t) composite_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(horizontal_scale*(GetRedPixelComponent(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(vertical_scale*(GetGreenPixelComponent(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetOpacityPixelComponent(p))); SetPixelPacket(destination_image,&pixel,r,destination_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) break; } destination_view=DestroyCacheView(destination_view); composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); composite_image=destination_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; destination_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { destination_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) destination_dissolve=geometry_info.sigma/100.0; if ((destination_dissolve-MagickEpsilon) < 0.0) destination_dissolve=0.0; modify_outside_overlay=MagickTrue; if ((destination_dissolve+MagickEpsilon) > 1.0 ) { destination_dissolve=1.0; modify_outside_overlay=MagickFalse; } } break; } case BlendCompositeOp: { value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; destination_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) destination_dissolve=geometry_info.sigma/100.0; modify_outside_overlay=MagickTrue; if ((destination_dissolve+MagickEpsilon) > 1.0) modify_outside_overlay=MagickFalse; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the brightness and saturation scale. */ value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_brightness=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_saturation=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is depreciated */ value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(composite_image,"compose:outside-overlay"); if (value != (const char *) NULL) modify_outside_overlay=IsMagickTrue(value); /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(composite_image,&zero); exception=(&image->exception); image_view=AcquireCacheView(image); composite_view=AcquireCacheView(composite_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double brightness, hue, saturation; MagickPixelPacket composite, destination, source; register const IndexPacket *restrict composite_indexes; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; if (modify_outside_overlay == MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) composite_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) composite_image->rows)) { p=GetCacheViewVirtualPixels(composite_view,0,y-y_offset, composite_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); composite_indexes=GetCacheViewVirtualIndexQueue(composite_view); source=zero; destination=zero; hue=0.0; saturation=0.0; brightness=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (modify_outside_overlay == MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) composite_image->columns) break; } destination.red=(MagickRealType) GetRedPixelComponent(q); destination.green=(MagickRealType) GetGreenPixelComponent(q); destination.blue=(MagickRealType) GetBluePixelComponent(q); if (image->matte != MagickFalse) destination.opacity=(MagickRealType) GetOpacityPixelComponent(q); if (image->colorspace == CMYKColorspace) destination.index=(MagickRealType) GetIndexPixelComponent(indexes+x); if (image->colorspace == CMYKColorspace) { destination.red=(MagickRealType) QuantumRange-destination.red; destination.green=(MagickRealType) QuantumRange-destination.green; destination.blue=(MagickRealType) QuantumRange-destination.blue; destination.index=(MagickRealType) QuantumRange-destination.index; } /* Handle destination modifications outside overlaid region. */ composite=destination; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) composite_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange- destination_dissolve*(QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&destination,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(composite_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetRedPixelComponent(q,ClampToQuantum(composite.red)); SetGreenPixelComponent(q,ClampToQuantum(composite.green)); SetBluePixelComponent(q,ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetOpacityPixelComponent(q,ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetIndexPixelComponent(indexes+x,ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto destination. */ source.red=(MagickRealType) GetRedPixelComponent(p); source.green=(MagickRealType) GetGreenPixelComponent(p); source.blue=(MagickRealType) GetBluePixelComponent(p); if (composite_image->matte != MagickFalse) source.opacity=(MagickRealType) GetOpacityPixelComponent(p); if (composite_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetIndexPixelComponent(composite_indexes+ x-x_offset); if (composite_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&destination,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&destination, destination.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&destination,destination.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&destination,&composite); break; } case DstInCompositeOp: { CompositeIn(&destination,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&destination,&composite); break; } case DstOutCompositeOp: { CompositeOut(&destination,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&destination,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&destination,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&destination,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&destination,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&destination,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&destination,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&destination,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&destination,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&destination,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&destination,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&destination,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&destination,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&destination,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&destination,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&destination,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&destination,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&destination,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&destination,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&destination,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&destination,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&destination,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&destination,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&destination,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&destination,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&destination,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&destination,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&destination,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&destination,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&destination,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&destination,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&destination) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&destination,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&destination, (MagickRealType) (QuantumRange-destination_dissolve*(QuantumRange- destination.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&destination, destination_dissolve,&composite); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&destination,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHSB(destination.red,destination.green,destination.blue,&hue, &saturation,&brightness); brightness+=(0.01*percent_brightness*offset)/midpoint; saturation*=0.01*percent_saturation; HSBComposite(hue,saturation,brightness,&composite.red, &composite.green,&composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHSB(destination.red,destination.green,destination.blue,&hue, &saturation,&brightness); CompositeHSB(source.red,source.green,source.blue,&hue,&sans,&sans); HSBComposite(hue,saturation,brightness,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHSB(destination.red,destination.green,destination.blue,&hue, &saturation,&brightness); CompositeHSB(source.red,source.green,source.blue,&sans,&saturation, &sans); HSBComposite(hue,saturation,brightness,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHSB(destination.red,destination.green,destination.blue,&hue, &saturation,&brightness); CompositeHSB(source.red,source.green,source.blue,&sans,&sans, &brightness); HSBComposite(hue,saturation,brightness,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHSB(destination.red,destination.green,destination.blue,&sans, &sans,&brightness); CompositeHSB(source.red,source.green,source.blue,&hue,&saturation, &sans); HSBComposite(hue,saturation,brightness,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) { composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); break; } composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetRedPixelComponent(q,ClampToQuantum(composite.red)); SetGreenPixelComponent(q,ClampToQuantum(composite.green)); SetBluePixelComponent(q,ClampToQuantum(composite.blue)); SetOpacityPixelComponent(q,ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetIndexPixelComponent(indexes+x,ClampToQuantum(composite.index)); p++; if (p >= (pixels+composite_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); if (destination_image != (Image * ) NULL) destination_image=DestroyImage(destination_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (texture == (const Image *) NULL) return(MagickFalse); (void) SetImageVirtualPixelMethod(texture,TileVirtualPixelMethod); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture->matte != MagickFalse))) { /* Tile texture onto the image background. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1) #endif for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture,x+ texture->tile_offset.x,y+texture->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); texture_view=AcquireCacheView(texture); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture->tile_offset.x,(y+ texture->tile_offset.y) % texture->rows,texture->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture->columns) { width=texture->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) CopyMagickMemory(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture->colorspace == CMYKColorspace)) { (void) CopyMagickMemory(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); return(status); }
PixelBasedRenderFunc.h
#include"..\Core\SanTypes.h" #include"SanGraphics.h" namespace San { namespace Graphics { #ifndef __SANPIXELBASEDRENDERFUNC_H__ #define __SANPIXELBASEDRENDERFUNC_H__ struct PIXCAMERA { SVECTOR3 Coord; SVECTOR3 LookAt; SVECTOR3 Dir; SVECTOR3 UpDir; sfloat Cutoff; sfloat Near; sfloat Far; SVECTOR3 ScreenVec[4]; }; void ClearScreenRGBA(sfloat* p_buffer, const uint32 width, const uint32 height, const SANCOLORF &Color); void DrawImageRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, sfloat* p_img, const uint32 img_width, const uint32 img_height, const SVECTOR3 &pos, bool bUseAlpha = false); void DrawCrossRGBA(sfloat* p_buffer, const uint32 width, uint32 height, const SVECTOR3 pos, const SANCOLORF color, const uint32 size,const uint32 thickness); void DrawDotRGBA(sfloat* p_buffer, const uint32 width, uint32 height, const SVECTOR3 pos, const SANCOLORF color, sfloat radius); void DrawQuadrRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const uint32 quadr_width, const uint32 quadr_height, const SANCOLORF color, const SVECTOR3 &pos, const uint32 size); void DrawLineRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const SVECTOR3 pos_begin, const SVECTOR3 pos_end, const SANCOLORF color, const uint32 size); void UpdateCamera(PIXCAMERA &Camera, uint32 ScreenWidth, uint32 ScreenHeight); SVECTOR3 GetScreenCoord(const SVECTOR3 Coord, const PIXCAMERA Camera, uint32 ScreenWidth, uint32 ScreenHeight); sfloat GetHitPoint(const SVECTOR3* pRayCoord, const SVECTOR3* pRayDir, const SVECTOR3* pVec1, const SVECTOR3* pVec2, const SVECTOR3* pVec3, const SVECTOR3* pNormal, SVECTOR3* pHitPoint); SVECTOR3 Interpolation(const SVECTOR3* pVecSrc, const SVECTOR3* pVec1, const SVECTOR3* pVec2, const SVECTOR3* pVec3); void DrawDot3DRGBA(sfloat* p_buffer, const uint32 width, uint32 height, const SVECTOR3 pos, const SANCOLORF color, sfloat radius, const PIXCAMERA camera, const SVECTOR3 offset = SVECTOR3(0.0, 0.0, 0.0)); void DrawLine3DRGBA(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const SVECTOR3 coord_begin, const SVECTOR3 coord_end, const SANCOLORF color, const uint32 size, const PIXCAMERA camera, const SVECTOR3 offset = SVECTOR3(0.0, 0.0, 0.0)); void DrawLineGraph(sfloat* p_buffer, const uint32 buf_width, const uint32 buf_height, const uint32 dest_width, const uint32 dest_height, sfloat* p_dataset, uint32 data_size, const SVECTOR3 pos, const SANCOLORF color, const uint32 size); template<class DType, class SType> void BufferCopy(DType* pDest, SType* pSrc, uint32 BufferSize, sfloat min = 0.0f, sfloat max = 255.0f) { #pragma omp parallel for for (int32 seek = 0; seek < BufferSize; seek = seek + 1) { pDest[seek] = pSrc[seek]<min ? min : (pSrc[seek]>max ? max : pSrc[seek]); } } #endif } }
regul.h
#ifndef REGUL_H #define REGUL_H #include "linalg.h" Timer timer_global, timer_global2, timer_global3; enum regul_t { L2, L1, ELASTICNET, L1BALL, L2BALL, FUSEDLASSO, L1L2, L1LINF, NONE, L1L2_L1, INCORRECT_REG }; static bool is_regul_for_matrices(const regul_t& reg) { return reg==L1L2 || reg==L1LINF; } template <typename T> struct ParamModel { ParamModel() { regul=NONE; lambda=0; lambda2=0; lambda3=0; intercept=false; loss=SQUARE; }; loss_t loss; regul_t regul; T lambda; T lambda2; T lambda3; bool intercept; }; template <typename T> void clean_param_model(ParamModel<T>& param) { if (param.regul==FUSEDLASSO && param.lambda==0) { param.regul=ELASTICNET; param.lambda=param.lambda2; param.lambda2=param.lambda3; }; if (param.regul==ELASTICNET) { if (param.lambda==0) { param.regul=L2; param.lambda=param.lambda2; }; if (param.lambda2==0) param.regul=L1; if (param.lambda==0 && param.lambda2==0) param.regul=NONE; } else { if (param.lambda==0) param.regul=NONE; } } static regul_t regul_from_string(char* regul) { if (strcmp(regul,"l1")==0) return L1; if (strcmp(regul,"l1-ball")==0) return L1BALL; if (strcmp(regul,"fused-lasso")==0) return FUSEDLASSO; if (strcmp(regul,"l2")==0) return L2; if (strcmp(regul,"l2-ball")==0) return L2BALL; if (strcmp(regul,"elastic-net")==0) return ELASTICNET; if (strcmp(regul,"l1l2")==0) return L1L2; if (strcmp(regul,"l1l2+l1")==0) return L1L2_L1; if (strcmp(regul,"l1linf")==0) return L1LINF; if (strcmp(regul,"none")==0) return NONE; return INCORRECT_REG; } template <typename D, typename I> class Regularizer { public: typedef typename D::value_type T; typedef I index_type; Regularizer(const ParamModel<T>& model) : _intercept(model.intercept), _id(model.regul) { }; virtual ~Regularizer() { }; virtual void prox(const D& input, D& output, const T eta) const = 0; // should be able to do inplace with output=input virtual T eval(const D& input) const = 0; virtual T fenchel(D& grad1, D& grad2) const = 0; virtual void print() const = 0; virtual bool is_lazy() const { return false; }; virtual void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { }; virtual bool provides_fenchel() const { return true; }; virtual regul_t id() const { return _id;}; virtual bool intercept() const { return _intercept;}; virtual T strong_convexity() const { return 0; }; virtual T lambda() const { return 0;}; protected: const bool _intercept; private: explicit Regularizer<D,I>(const Regularizer<D,I>& reg); Regularizer<D,I>& operator=(const Regularizer<D,I>& reg); const regul_t _id; }; template <typename D, typename I> class None final : public Regularizer<D,I> { public: typedef typename D::value_type T; None(const ParamModel<T>& model) : Regularizer<D,I>(model) { }; virtual void prox(const D& input, D& output, const T eta) const { output.copy(input); }; inline T eval(const D& input) const { return 0; }; inline T fenchel(D& grad1, D& grad2) const { return 0; }; bool provides_fenchel() const { return false; }; void print() const { cout << "No regularization" << endl; } }; template <typename D, typename I> class Ridge final : public Regularizer<D,I> { public: typedef typename D::value_type T; Ridge(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { }; inline void prox(const D& input, D& output, const T eta) const { output.copy(input); output.scal(T(1.0/(1.0+_lambda*eta))); if (this->_intercept) { const int n = input.n(); output[n-1]=input[n-1]; } }; inline T eval(const D& input) const { const int n = input.n(); const T res = input.nrm2sq(); return (this->_intercept ? T(0.5)*_lambda*(res - input[n-1]*input[n-1]) : T(0.5)*_lambda*res); }; inline T fenchel(D& grad1, D& grad2) const { return (this->_intercept & (abs<T>(grad2[grad2.n()-1]) > 1e-6)) ? INFINITY : this->eval(grad2)/(_lambda*_lambda); }; void print() const { cout << "L2 regularization" << endl; } virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda; }; virtual T lambda() const { return _lambda; }; inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { const T scal = T(1.0)/(T(1.0)+_lambda*eta); const int p = input.n(); const int r = indices.n(); for (int jj=0; jj<r; ++jj) output[indices[jj]]=scal*input[indices[jj]]; if (this->_intercept) output[p-1]=input[p-1]; }; virtual bool is_lazy() const { return true; }; private: const T _lambda; }; template <typename D, typename I> class Lasso final : public Regularizer<D,I> { public: typedef typename D::value_type T; Lasso(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { }; inline void prox(const D& input, D& output, const T eta) const { input.fastSoftThrshold(output,eta*_lambda); if (this->_intercept) { const int n = input.n(); output[n-1]=input[n-1]; } }; inline T eval(const D& input) const { const int n = input.n(); const T res = input.asum(); return (this->_intercept ? _lambda*(res - abs<T>(input[n-1])) : _lambda*res); }; inline T fenchel(D& grad1, D& grad2) const { const T mm = grad2.fmaxval(); if (mm > _lambda) grad1.scal(_lambda/mm); return (this->_intercept & (abs<T>(grad2[grad2.n()-1]) > 1e-6)) ? INFINITY : 0; }; void print() const { cout << "L1 regularization" << endl; } virtual T lambda() const { return _lambda;}; inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { const int p = input.n(); const int r = indices.n(); const T thrs=_lambda*eta; //#pragma omp parallel for for (int jj=0; jj<r; ++jj) output[indices[jj]]=fastSoftThrs(input[indices[jj]],thrs);; if (this->_intercept) output[p-1]=input[p-1]; }; virtual bool is_lazy() const { return true; }; private: const T _lambda; }; template <typename D, typename I> class ElasticNet final : public Regularizer<D,I> { public: typedef typename D::value_type T; // min_x 0.5|y-x|^2 + lambda_1 |x| + 0.5 lambda_2 x^2 // min_x - y x + 0.5 x^2 + lambda_1 |x| + 0.5 lambda_2 x^2 // min_x - y x + 0.5 (1+lambda_2) x^2 + lambda_1 |x| // min_x - y/(1+lambda2) x + 0.5 x^2 + lambda_1/(1+lambda2) |x| ElasticNet(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda), _lambda2(model.lambda2) { }; inline void prox(const D& input, D& output, const T eta) const { output.copy(input); output.fastSoftThrshold(_lambda*eta); output.scal(T(1.0)/(1+_lambda2*eta)); if (this->_intercept) { const int n = input.n(); output[n-1]=input[n-1]; } }; inline T eval(const D& input) const { const int n = input.n(); const T res = _lambda*input.asum() + T(0.5)*_lambda2*input.nrm2sq(); return (this->_intercept ? res - _lambda*abs<T>(input[n-1]) - T(0.5)*_lambda2*input[n-1]*input[n-1] : res); }; // max_x xy - lambda_1 |x| - 0.5 lambda_2 x^2 // - min_x - xy + lambda_1 |x| + 0.5 lambda_2 x^2 // -(1/lambda2) min_x - xy/lambda2 + lambda_1/lambda_2 |x| + 0.5 x^2 // x^* = prox_(l1 lambda_1/lambda_2) [ y/lambda2] // x^* = prox_(l1 lambda_1) [ y] /_lambda2 inline T fenchel(D& grad1, D& grad2) const { D tmp; tmp.copy(grad2); grad2.fastSoftThrshold(_lambda); const int n = grad2.n(); T res0 = _lambda*grad2.asum()/_lambda2 + T(0.5)*grad2.nrm2sq()/_lambda2; if (this->_intercept) res0 -= _lambda*abs<T>(grad2[n-1])/_lambda2 - T(0.5)*grad2[n-1]*grad2[n-1]/_lambda2; const T res = tmp.dot(grad2)/_lambda2 - res0; return (this->_intercept & (abs<T>(tmp[tmp.n()-1]) > 1e-6)) ? INFINITY : res; }; void print() const { cout << "Elastic Net regularization" << endl; } virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda2; }; virtual T lambda() const { return _lambda;}; inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { const int p = input.n(); const int r = indices.n(); const T thrs=_lambda*eta; const T scal = T(1.0)/(T(1.0)+_lambda2*eta); #pragma omp parallel for for (int jj=0; jj<r; ++jj) output[indices[jj]]=scal*fastSoftThrs(input[indices[jj]],thrs);; if (this->_intercept) output[p-1]=input[p-1]; }; virtual bool is_lazy() const { return true; }; private: const T _lambda; const T _lambda2; }; template <typename D, typename I> class L1Ball final : public Regularizer<D,I> { public: typedef typename D::value_type T; L1Ball(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { }; inline void prox(const D& input, D& output, const T eta) const { D tmp; tmp.copy(input); if (this->_intercept) { tmp[tmp.n()-1]=0; tmp.sparseProject(output,_lambda,1,0,0,0,false); output[output.n()-1] = input[output.n()-1]; } else { tmp.sparseProject(output,_lambda,1,0,0,0,false); } }; inline T eval(const D& input) const { return 0; }; inline T fenchel(D& grad1, D& grad2) const { Vector<T> output; output.copy(grad2); if (this->_intercept) output[output.n()-1]=0; return _lambda*(output.fmaxval()); }; void print() const { cout << "L1 ball regularization" << endl; } virtual T lambda() const { return _lambda;}; private: const T _lambda; }; template <typename D, typename I> class L2Ball final : public Regularizer<D,I> { public: typedef typename D::value_type T; L2Ball(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda) { }; inline void prox(const D& input, D& output, const T eta) const { D tmp; tmp.copy(input); if (this->_intercept) { tmp[tmp.n()-1]=0; const T nrm = tmp.nrm2(); if (nrm > _lambda) tmp.scal(_lambda/nrm); output[output.n()-1] = input[output.n()-1]; } else { const T nrm = tmp.nrm2(); if (nrm > _lambda) tmp.scal(_lambda/nrm); } }; inline T eval(const D& input) const { return 0; }; inline T fenchel(D& grad1, D& grad2) const { Vector<T> output; output.copy(grad2); if (this->_intercept) output[output.n()-1]=0; return _lambda*(output.nrm2()); }; void print() const { cout << "L1 ball regularization" << endl; } virtual T lambda() const { return _lambda;}; private: const T _lambda; }; template <typename D, typename I> class FusedLasso final : public Regularizer<D,I> { public: typedef typename D::value_type T; FusedLasso(const ParamModel<T>& model) : Regularizer<D,I>(model), _lambda(model.lambda), _lambda2(model.lambda2), _lambda3(model.lambda3) { }; inline void prox(const D& x, D& output, const T eta) const { output.resize(x.n()); Vector<T> copyx; copyx.copy(x); copyx.fusedProjectHomotopy(output,_lambda2,_lambda,_lambda3,true); }; inline T eval(const D& x) const { T sum = T(); const int maxn = this->_intercept ? x.n()-1 : x.n(); for (int i = 0; i<maxn-1; ++i) sum += _lambda*abs(x[i+1]-x[i]) + _lambda2*abs(x[i]) + T(0.5)*_lambda3*x[i]*x[i]; sum += _lambda2*abs(x[maxn-1])+0.5*_lambda3*x[maxn-1]*x[maxn-1]; return sum; }; inline T fenchel(D& grad1, D& grad2) const { return 0; }; void print() const { cout << "Fused Lasso regularization" << endl; } bool provides_fenchel() const { return false; }; virtual T strong_convexity() const { return this->_intercept ? 0 : _lambda3; }; virtual T lambda() const { return _lambda;}; private: const T _lambda; const T _lambda2; const T _lambda3; }; template <typename Reg> class RegMat final : public Regularizer< Matrix<typename Reg::T>, typename Reg::index_type > { public: typedef typename Reg::T T; typedef typename Reg::index_type I; RegMat(const ParamModel<T>& model, const int num_cols, const bool transpose) : Regularizer< Matrix<T>, I >(model), _N(num_cols), _transpose(transpose) { _regs=new Reg*[_N]; for (int i = 0; i<_N; ++i) _regs[i]=new Reg(model); }; virtual ~RegMat() { for (int i = 0; i<_N; ++i) { delete(_regs[i]); _regs[i]=NULL; } delete[](_regs); }; void inline prox(const Matrix<T>& x, Matrix<T>& y, const T eta) const { y.copy(x); int i; #pragma omp parallel for private(i) for (i = 0; i<_N; ++i) { Vector<T> colx, coly; if (_transpose) { x.copyRow(i,colx); y.copyRow(i,coly); } else { x.refCol(i,colx); y.refCol(i,coly); } _regs[i]->prox(colx,coly,eta); if (_transpose) y.copyToRow(i,coly); } }; T inline eval(const Matrix<T>& x) const { T sum = 0; #pragma omp parallel for reduction(+ : sum) for (int i = 0; i<_N; ++i) { Vector<T> col; if (_transpose) { x.copyRow(i,col); } else { x.refCol(i,col); } const T val = _regs[i]->eval(col); sum += val; } return sum; }; T inline fenchel(Matrix<T>& grad1, Matrix<T>& grad2) const { T sum=0; #pragma omp parallel for reduction(+ : sum) for (int i = 0; i<_N; ++i) { Vector<T> col1, col2; if (_transpose) { grad1.copyRow(i,col1); grad2.copyRow(i,col2); } else { grad1.refCol(i,col1); grad2.refCol(i,col2); } const T fench=_regs[i]->fenchel(col1,col2); sum+=fench; if (_transpose) { grad1.copyToRow(i,col1); grad2.copyToRow(i,col2); } } return sum; }; virtual bool provides_fenchel() const { bool ok=true; for (int i = 0; i<_N; ++i) ok = ok && _regs[i]->provides_fenchel(); return ok; }; void print() const { cout << "Regularization for matrices" << endl; _regs[0]->print(); }; virtual T lambda() const { return _regs[0]->lambda();}; inline void lazy_prox(const Matrix<T>& input, Matrix<T>& output, const Vector<I>& indices, const T eta) const { #pragma omp parallel for for (int i = 0; i<_N; ++i) { Vector<T> colx, coly; output.refCol(i,coly); if (_transpose) { input.copyRow(i,colx); } else { input.refCol(i,colx); } _regs[i]->lazy_prox(colx,coly,indices,eta); } }; virtual bool is_lazy() const { return _regs[0]->is_lazy(); }; protected: int _N; Reg** _regs; bool _transpose; }; template <typename Reg> class RegVecToMat final : public Regularizer< Matrix<typename Reg::T>, typename Reg::index_type > { public: typedef typename Reg::T T; typedef typename Reg::index_type I; typedef Matrix<T> D; RegVecToMat(const ParamModel<T>& model) : Regularizer<D,I>(model), _intercept(model.intercept) { ParamModel<T> model2=model; model2.intercept=false; _reg=new Reg(model2); }; ~RegVecToMat() { delete(_reg);}; inline void prox(const D& input, D& output, const T eta) const { Vector<T> w1, w2, b1, b2; output.resize(input.m(),input.n()); get_wb(input,w1,b1); get_wb(output,w2,b2); _reg->prox(w1,w2,eta); if (_intercept) b2.copy(b1); }; inline T eval(const D& input) const { Vector<T> w, b; get_wb(input,w,b); return _reg->eval(w); } inline T fenchel(D& grad1, D& grad2) const { Vector<T> g1; grad1.toVect(g1); Vector<T> w, b; get_wb(grad2,w,b); return (this->_intercept && ((b.nrm2sq()) > 1e-7) ? INFINITY : _reg->fenchel(g1,w)); }; void print() const { _reg->print(); } virtual T strong_convexity() const { return _intercept ? 0 : _reg->strong_convexity(); }; virtual T lambda() const { return _reg->lambda(); }; inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { Vector<T> w1, w2, b1, b2; output.resize(input.m(),input.n()); get_wb(input,w1,b1); get_wb(output,w2,b2); _reg->lazy_prox(w1,w2,indices,eta); if (_intercept) b2.copy(b1); }; virtual bool is_lazy() const { return _reg->is_lazy(); }; private: inline void get_wb(const Matrix<T>& input, Vector<T>& w, Vector<T>& b) const { const int p = input.n(); Matrix<T> W; if (_intercept) { input.refSubMat(0,p-1,W); input.refCol(p-1,b); } else { input.refSubMat(0,p,W); } W.toVect(w); }; Reg* _reg; const bool _intercept; }; template <typename T> struct normL2 { public: typedef T value_type; normL2(const ParamModel<T>& model) : _lambda(model.lambda) { }; inline void prox(Vector<T>& x, const T thrs) const { const T nrm=x.nrm2(); const T thrs2 = thrs*_lambda; if (nrm > thrs2) { x.scal((nrm-thrs2)/nrm); } else { x.setZeros(); } }; inline T eval(const Vector<T>& x) const { return _lambda*x.nrm2(); }; static inline void print() { cout << "L2"; }; inline T eval_dual(const Vector<T>& x) const { return x.nrm2()/_lambda; }; private: const T _lambda; }; template <typename T> struct normLinf { public: typedef T value_type; normLinf(const ParamModel<T>& model) : _lambda(model.lambda) { }; inline void prox(Vector<T>& x, const T thrs) const { Vector<T> z; x.l1project(z,thrs*_lambda); x.sub(z); }; inline T eval(const Vector<T>& x) const { return _lambda*x.fmaxval(); }; static inline void print() { cout << "LInf"; } inline T eval_dual(const Vector<T>& x) const { return x.asum()/_lambda; }; private: const T _lambda; }; template <typename T> struct normL2_L1 { public: typedef T value_type; normL2_L1(const ParamModel<T>& model) : _lambda(model.lambda), _lambda2(model.lambda2) { }; inline void prox(Vector<T>& x, const T thrs) const { x.fastSoftThrshold(x,thrs*_lambda2); const T nrm=x.nrm2(); const T thrs2 = thrs*_lambda; if (nrm > thrs2) { x.scal((nrm-thrs2)/nrm); } else { x.setZeros(); } }; inline T eval(const Vector<T>& x) const { return _lambda*x.nrm2()+_lambda2*x.asum(); }; static inline void print() { cout << "L2+L1"; }; inline T eval_dual(const Vector<T>& x) const { Vector<T> sorted_x; sorted_x.copy(x); sorted_x.abs_vec(); sorted_x.sort(false); const int n = x.n(); T lambda_gamma=0; T sum_sq=0; T sum_lin=0; for (int ii=0; ii<n; ++ii) { lambda_gamma=sorted_x[ii]; sum_lin+=lambda_gamma; sum_sq+=lambda_gamma*lambda_gamma; const T lambda_mu = _lambda*lambda_gamma/(_lambda2); if (sum_sq - 2*lambda_gamma*sum_lin + (ii+1)*lambda_gamma*lambda_gamma >= lambda_mu*lambda_mu) { sum_lin-=lambda_gamma; sum_sq -=lambda_gamma*lambda_gamma; const T dual=solve_binomial2((ii)*_lambda2*_lambda2-_lambda*_lambda,-2*sum_lin*_lambda2,sum_sq); return dual; } } return 0; }; private: const T _lambda; const T _lambda2; }; template <typename N, typename I> class MixedL1LN final : public Regularizer< Matrix<typename N::value_type>, I > { public: typedef typename N::value_type T; typedef Matrix<T> D; MixedL1LN(const ParamModel<T>& model, const int nclass, const bool transpose) : Regularizer<D,I>(model), _transpose(transpose), _lambda(model.lambda), _norm(model) { }; inline void prox(const D& x, D& y, const T eta) const { const int n = x.n(); const int m = x.m(); y.copy(x); if (_transpose) { const int nn = this->_intercept ? n-1 : n; #pragma omp parallel for for (int i = 0; i<nn; ++i) { Vector<T> col; y.refCol(i,col); _norm.prox(col,eta); } } else { const int nn = this->_intercept ? m-1 : m; #pragma omp parallel for for (int i = 0; i<nn; ++i) { Vector<T> row; y.copyRow(i,row); _norm.prox(row,eta); y.copyToRow(i,row); } } }; T inline eval(const D& x) const { T sum=0; const int n = x.n(); const int m = x.m(); if (_transpose) { const int nn = this->_intercept ? n-1 : n; #pragma omp parallel for reduction(+ : sum) for (int i = 0; i<nn; ++i) { Vector<T> col; x.refCol(i,col); sum+=_norm.eval(col); } } else { const int nn = this->_intercept ? m-1 : m; #pragma omp parallel for reduction(+:sum) for (int i = 0; i<nn; ++i) { Vector<T> row; x.copyRow(i,row); sum+=_norm.eval(row); } } return sum; } // grad1 is nclasses * n inline T fenchel(D& grad1, D& grad2) const { const int n = grad2.n(); const int m = grad2.m(); T res=0; T mm=0; if (_transpose) { const int nn = this->_intercept ? n-1 : n; for (int i = 0; i<nn; ++i) { Vector<T> col; grad2.refCol(i,col); mm = MAX(_norm.eval_dual(col),mm); } Vector<T> col; if (this->_intercept) { grad2.refCol(nn,col); if (col.nrm2sq() > T(1e-7)) res=INFINITY; } } else { const int nn = this->_intercept ? m-1 : m; for (int i = 0; i<nn; ++i) { Vector<T> row; grad2.copyRow(i,row); mm = MAX(_norm.eval_dual(row),mm); } Vector<T> col; if (this->_intercept) { grad2.copyRow(nn,col); if (col.nrm2sq() > T(1e-7)) res=INFINITY; } } if (mm > T(1.0)) grad1.scal(T(1.0)/mm); return res; }; void print() const { cout << "Mixed L1-"; N::print(); cout << " norm regularization" << endl; } inline T lambda() const { return _lambda; }; inline void lazy_prox(const D& input, D& output, const Vector<I>& indices, const T eta) const { output.resize(input.m(),input.n()); const int r = indices.n(); const int m = input.m(); const int n = input.n(); if (_transpose) { #pragma omp parallel for for (int i = 0; i<r; ++i) { const int ind=indices[i]; Vector<T> col, col1; input.refCol(ind,col1); output.refCol(ind,col); col.copy(col1); _norm.prox(col,eta); } if (this->_intercept) { Vector<T> col, col1; input.refCol(n-1,col1); output.refCol(n-1,col); col.copy(col1); } } else { #pragma omp parallel for for (int i = 0; i<r; ++i) { const int ind=indices[i]; Vector<T> col; input.copyRow(ind,col); _norm.prox(col,eta); output.copyToRow(ind,col); } if (this->_intercept) { Vector<T> col; input.copyRow(m-1,col); output.copyToRow(m-1,col); } } }; virtual bool is_lazy() const { return true; }; private: const bool _transpose; const T _lambda; N _norm; }; template <typename T, typename I> using MixedL1L2=MixedL1LN< normL2<T>, I >; template <typename T, typename I> using MixedL1Linf=MixedL1LN< normLinf<T> , I>; template <typename T, typename I> using MixedL1L2_L1=MixedL1LN< normL2_L1<T>, I >; #endif
GB_binop__plus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_01__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64) // A*D function (colscale): GB (_AxD__plus_fc64) // D*A function (rowscale): GB (_DxB__plus_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64) // C=scalar+B GB (_bind1st__plus_fc64) // C=scalar+B' GB (_bind1st_tran__plus_fc64) // C=A+scalar GB (_bind2nd__plus_fc64) // C=A'+scalar GB (_bind2nd_tran__plus_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_add (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_add (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_add (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_add (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_add (x, aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_add (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SparseDenseProduct.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H namespace Eigen { namespace internal { template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; }; template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1> struct sparse_time_dense_product_impl; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; typedef evaluator<Lhs> LhsEval; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { LhsEval lhsEval(lhs); Index n = lhs.outerSize(); #ifdef EIGEN_HAS_OPENMP Eigen::initParallel(); Index threads = Eigen::nbThreads(); #endif for(Index c=0; c<rhs.cols(); ++c) { #ifdef EIGEN_HAS_OPENMP // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems. // It basically represents the minimal amount of work to be done to be worth it. if(threads>1 && lhsEval.nonZerosEstimate() > 20000) { #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads) for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } else #endif { for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } } } static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col) { typename Res::Scalar tmp(0); for(LhsInnerIterator it(lhsEval,i); it ;++it) tmp += it.value() * rhs.coeff(it.index(),col); res.coeffRef(i,col) += alpha * tmp; } }; // FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format? // -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators // template<typename T1, typename T2/*, int _Options, typename _StrideType*/> // struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> > // { // enum { // Defined = 1 // }; // typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType; // }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index c=0; c<rhs.cols(); ++c) { for(Index j=0; j<lhs.outerSize(); ++j) { // typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.coeffRef(it.index(),c) += it.value() * rhs_j; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Res::RowXpr res_j(res.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res_j += (alpha*it.value()) * rhs.row(it.index()); } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Rhs::ConstRowXpr rhs_j(rhs.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.row(it.index()) += (alpha*it.value()) * rhs_j; } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType> inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha); } } // end namespace internal namespace internal { template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested; typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType> : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> {}; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested; typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); // transpose everything Transpose<Dst> dstT(dst); internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType> : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> {}; template<typename LhsT, typename RhsT, bool NeedToTranspose> struct sparse_dense_outer_product_evaluator { protected: typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1; typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs; typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType; // if the actual left-hand side is a dense vector, // then build a sparse-view so that we can seamlessly iterate over it. typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1, SparseView<Lhs1> >::type ActualLhs; typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1 const&, SparseView<Lhs1> >::type LhsArg; typedef evaluator<ActualLhs> LhsEval; typedef evaluator<ActualRhs> RhsEval; typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; public: enum { Flags = NeedToTranspose ? RowMajorBit : 0, CoeffReadCost = HugeCost }; class InnerIterator : public LhsIterator { public: InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer) : LhsIterator(xprEval.m_lhsXprImpl, 0), m_outer(outer), m_empty(false), m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() )) {} EIGEN_STRONG_INLINE Index outer() const { return m_outer; } EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } protected: Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const { return rhs.coeff(outer); } Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse()) { typename RhsEval::InnerIterator it(rhs, outer); if (it && it.index()==0 && it.value()!=Scalar(0)) return it.value(); m_empty = true; return Scalar(0); } Index m_outer; bool m_empty; Scalar m_factor; }; sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } // transpose case sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } protected: const LhsArg m_lhs; evaluator<ActualLhs> m_lhsXprImpl; evaluator<ActualRhs> m_rhsXprImpl; }; // sparse * dense outer product template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H
matmul_double.c
/* * Square matrix multiplication * A[N][N] * B[N][N] = C[N][N] * */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N 512 //#define N 16 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } void init(double **A) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (double)rand()/(double)(RAND_MAX/10.0); } } } void matmul_simd(double **A, double **B, double **C) { int i,j,k; double temp; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { temp = 0; #pragma omp simd reduction(+:temp) for (k = 0; k < N; k++) { temp += A[i][k] * B[j][k]; } C[i][j] = temp; } } } // Debug functions void print_matrix(double **matrix) { for (int i = 0; i<8; i++) { printf("["); for (int j = 0; j<8; j++) { printf("%.2f ", matrix[i][j]); } puts("]"); } puts(""); } void matmul_serial(double **A, double **B, double **C) { int i,j,k; double temp; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { temp = 0; for (k = 0; k < N; k++) { temp += A[i][k] * B[j][k]; } C[i][j] = temp; } } } double check(double **A, double **B){ double difference = 0; for(int i = 0;i<N; i++){ for (int j = 0; j<N; j++) { difference += A[i][j]- B[i][j];} } return difference; } // Main int main(int argc, char *argv[]) { //Set everything up double **A = malloc(sizeof(double*)*N); double **B = malloc(sizeof(double*)*N); double **C_simd = malloc(sizeof(double*)*N); double **C_serial = malloc(sizeof(double*)*N); double **BT = malloc(sizeof(double*)*N); for (int i = 0; i<N; i++) { A[i] = malloc(sizeof(double)*N); B[i] = malloc(sizeof(double)*N); C_simd[i] = malloc(sizeof(double)*N); C_serial[i] = malloc(sizeof(double)*N); BT[i] = malloc(sizeof(double)*N); } srand(time(NULL)); init(A); init(B); for(int line = 0; line<N; line++){ for(int col = 0; col<N; col++){ BT[line][col] = B[col][line]; } } int i; int num_runs = 10; double elapsed = read_timer(); for (i=0; i<num_runs; i++) matmul_simd(A, BT, C_simd); elapsed = (read_timer() - elapsed); double elapsed_serial = read_timer(); for (i=0; i<num_runs; i++) matmul_serial(A, BT, C_serial); elapsed_serial = (read_timer() - elapsed_serial); print_matrix(A); print_matrix(BT); puts("=\n"); print_matrix(C_simd); puts("---------------------------------"); print_matrix(C_serial); double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed)); double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial)); printf("======================================================================================================\n"); printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_omp:\t\t%4f\t%4f\n", elapsed, gflops_omp); printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial, gflops_serial); printf("Correctness check: %f\n", check(C_simd,C_serial)); return 0; }
GB_unaryop__one_bool_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_bool_bool // op(A') function: GB_tran__one_bool_bool // C type: bool // A type: bool // cast: ; // unaryop: cij = true #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_bool_bool ( bool *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_bool_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
single2.c
/* * a pragma immediately following another */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif void foo(void) { int num_threads =0; #pragma omp parallel #pragma omp single num_threads = omp_get_num_threads(); } void sort_par (int size) { #pragma omp parallel #pragma omp single nowait #pragma omp task untied sort_par(size); }
private-clauseModificado3.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num()0 #endif main(){ int i,n=7; int a[n], suma; for(i=0;i<n;i++) a[i]=i; #pragma omp parallel //private(suma) { suma=0; #pragma omp for for(i=0; i<n; i++){ suma=suma+a[i]; printf ("thread %d suma a[%d]/ ",omp_get_thread_num(),i); } printf("\nthread %d suma= %d",omp_get_thread_num(),suma); } printf("suma:\n", suma); }
GB_binop__min_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__min_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint8) // A*D function (colscale): GB (_AxD__min_uint8) // D*A function (rowscale): GB (_DxB__min_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__min_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__min_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint8) // C=scalar+B GB (_bind1st__min_uint8) // C=scalar+B' GB (_bind1st_tran__min_uint8) // C=A+scalar GB (_bind2nd__min_uint8) // C=A'+scalar GB (_bind2nd_tran__min_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT8 || GxB_NO_MIN_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
update_ops_pauli_multi.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif /** * perform multi_qubit_Pauli_gate with XZ mask. * * This function assumes bit_flip_mask is not 0, i.e., at least one bit is flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask. * This function update the quantum state with Pauli operation. * bit_flip_mask, phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be computed before calling this function. * See get_masks_from_*_list for the above four arguemnts. */ void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, CTYPE* state, ITYPE dim) { // pivot mask const ITYPE pivot_mask = 1ULL << pivot_qubit_index; // loop varaibles const ITYPE loop_dim = dim / 2; ITYPE state_index; const ITYPE mask = (1ULL << pivot_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; #ifdef _OPENMP UINT threshold = 14; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); // gather index ITYPE basis_1 = basis_0 ^ bit_flip_mask; // determine sign UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2; UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = cval_1 * PHASE_M90ROT[(global_phase_90rot_count + sign_0 * 2) % 4]; state[basis_1] = cval_0 * PHASE_M90ROT[(global_phase_90rot_count + sign_1 * 2) % 4]; } #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif } void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim) { // pivot mask const ITYPE pivot_mask = 1ULL << pivot_qubit_index; // loop varaibles const ITYPE loop_dim = dim / 2; ITYPE state_index; const ITYPE mask = (1ULL << pivot_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; // coefs const double cosval = cos(angle / 2); const double sinval = sin(angle / 2); #ifdef _OPENMP UINT threshold = 14; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); // gather index ITYPE basis_1 = basis_0 ^ bit_flip_mask; // determine parity int bit_parity_0 = count_population(basis_0 & phase_flip_mask) % 2; int bit_parity_1 = count_population(basis_1 & phase_flip_mask) % 2; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = cosval * cval_0 + 1.i * sinval * cval_1 * PHASE_M90ROT[(global_phase_90rot_count + bit_parity_0 * 2) % 4]; state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 * PHASE_M90ROT[(global_phase_90rot_count + bit_parity_1 * 2) % 4]; } #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif } void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim) { // loop varaibles const ITYPE loop_dim = dim; ITYPE state_index; #ifdef _OPENMP UINT threshold = 14; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // determine parity int bit_parity = count_population(state_index & phase_flip_mask) % 2; // set values if (bit_parity % 2 == 1) { state[state_index] *= -1; } } #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif } void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; // coefs const double cosval = cos(angle / 2); const double sinval = sin(angle / 2); #ifdef _OPENMP UINT threshold = 14; UINT default_thread_count = omp_get_max_threads(); if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1); #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // determine sign int bit_parity = count_population(state_index & phase_flip_mask) % 2; int sign = 1 - 2 * bit_parity; // set value state[state_index] *= cosval + (CTYPE)sign * 1.i * sinval; } #ifdef _OPENMP omp_set_num_threads(default_thread_count); #endif } void multi_qubit_Pauli_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state, dim); } else { multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim); } } void multi_qubit_Pauli_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state, dim); } else { multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim); } } void multi_qubit_Pauli_rotation_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim); } else { multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim); } } void multi_qubit_Pauli_rotation_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, double angle, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim); } else { multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim); } }
main.c
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <time.h> #include <omp.h> #define NDEBUG #ifndef NDEBUG #define DEBUG(cmd) cmd; #else #define DEBUG(cmd) ; #endif // Check if number is prime bool is_prime(int n) { DEBUG( int thread_id = omp_get_thread_num(); printf("[Thread %.2d] Check number %d\n", thread_id, n); ) for (int i = 2; i < n; i++) { if (n % i == 0) { return false; } } return true; } int main(int argc, char** argv) { int n = 10; // default number of elements in range if (argc == 2) { n = atoi(argv[1]); // read n from cmd input } int primes_len = 0; // result #pragma omp parallel for schedule(static) reduction(+:primes_len) for (int i = 2; i <= n; i++) { if (is_prime(i)) { primes_len++; } } printf("The number of primes in range 1 ... %d: %d\n", n, primes_len); return 0; }
GB_unop__identity_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint16) // op(A') function: GB (_unop_tran__identity_uint64_uint16) // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint16) ( uint64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_strength.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" #include "hypre_hopscotch_hash.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* ----------------------------------------------------------------------- */ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) hypre_NvtxPushRange("CreateS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSHost(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } #if defined(HYPRE_USING_CUDA) hypre_NvtxPopRange(); #endif return ierr; } /* ----------------------------------------------------------------------- */ /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Int *dof_func_offd = NULL; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int num_sends, start, j, index; HYPRE_Int *int_buf_data; HYPRE_Int ierr = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *prefix_sum_workspace; HYPRE_Int my_id; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ hypre_MPI_Comm_rank(comm, &my_id); num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[jj])) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[A_offd_j[jA]])) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ } /* num_functions <=1 */ /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* end diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions <=1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = fabs(diag); if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */ if ( fabs(row_sum) < fabs(diag)*(2.0-max_row_sum) && max_row_sum < 1.0 ) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_A; i++) S_marker[i] = -1; for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j-cnt-recv_vec_starts_A[i]+recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A+num_recvs_A, HYPRE_MEMORY_HOST); j=0; for (i=0; i < num_sends_A; i++) hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i], 0,comm,&requests[j++]); for (i=0; i < num_recvs_A; i++) hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i], 0,comm,&requests[j++]); status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j,requests,status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i=0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i]) num_sends_S++; } else num_sends_S++; total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S+1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i=0; i < num_sends_A; i++) { cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i=0; i < send_map_starts_S[num_sends_S]; i++) send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i]-first_row); hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j=NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1]-1; if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1]; hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = coarse_row_starts[my_id]; my_last_cpt = coarse_row_starts[my_id+1]-1; global_num_coarse = coarse_row_starts[num_procs]; #endif if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) num_coarse_private++; } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end+1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) index++; } for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) index++; } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]); if (num_nonzeros) S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i=0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { jrow = send_map_elmts[j]; for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]]+my_first_cpt; } for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } tmp_send_map_starts[i+1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i=0; i < recv_vec_starts[num_recvs]; i++) S_ext_i[i+1] += S_ext_i[i]; num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_recv_vec_starts[0] = 0; for (i=0; i < num_recvs; i++) tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]]; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,S_int_j,S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16*hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else S_ext_diag_size_private++; } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_big_offd_j[S_ext_offd_size_private++] = big_i1; //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i=0; i < num_cols_offd_S; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) S_ext_offd_size++; else S_ext_diag_size++; } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i=0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) num_coarse_offd++; for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_ext_j[cnt_offd++] = big_i1; else S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; if (S_ext_offd_size || num_coarse_offd) hypre_TFree(temp, HYPRE_MEMORY_HOST); for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else map_S_to_C[i] = -1; } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); if (num_cols_offd_C) S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse*my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C*my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse*my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C*my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) S_marker = S_marker_array + num_coarse*my_thread_num; if (num_cols_offd_C) S_marker_offd = S_marker_offd_array + num_cols_offd_C*my_thread_num; for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic+1] == C_diag_i[ic] && C_offd_i[ic+1] == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2*(my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2*(my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); hypre_ParCSRMatrixOwnsRowStarts(S2) = 0; C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } hypre_CSRMatrixMemoryLocation(C_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_HOST; *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (CF_marker[i] == 1) CF_marker[i] = new_CF_marker[cnt++]; else { CF_marker[i] = 1; cnt++;} } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (new_CF_marker[cnt] == -1) CF_marker[i] = -2; else CF_marker[i] = 1; cnt++; } } return 0; }
ccv_bbf.c
#include "ccv.h" #include "ccv_internal.h" #include <sys/time.h> #ifdef HAVE_GSL #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #endif #ifdef USE_OPENMP #include <omp.h> #endif const ccv_bbf_param_t ccv_bbf_default_params = { .interval = 5, .min_neighbors = 2, .accurate = 1, .flags = 0, .size = { 24, 24, }, }; #define _ccv_width_padding(x) (((x) + 3) & -4) static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8) { #define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]])) #define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]])) unsigned char pmin = pf_at(0), nmax = nf_at(0); /* check if every point in P > every point in N, and take a shortcut */ if (pmin <= nmax) return 0; int i; for (i = 1; i < feature->size; i++) { if (feature->pz[i] >= 0) { int p = pf_at(i); if (p < pmin) { if (p <= nmax) return 0; pmin = p; } } if (feature->nz[i] >= 0) { int n = nf_at(i); if (n > nmax) { if (pmin <= n) return 0; nmax = n; } } } #undef pf_at #undef nf_at return 1; } static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier) { FILE* r = fopen(file, "r"); if (r == 0) return -1; (void)fscanf(r, "%d", &classifier->count); union { float fl; int i; } fli; (void)fscanf(r, "%d", &fli.i); classifier->threshold = fli.fl; classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t)); classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float)); int i, j; for (i = 0; i < classifier->count; i++) { (void)fscanf(r, "%d", &classifier->feature[i].size); for (j = 0; j < classifier->feature[i].size; j++) { (void)fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]); (void)fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]); } union { float fl; int i; } flia, flib; (void)fscanf(r, "%d %d", &flia.i, &flib.i); classifier->alpha[i * 2] = flia.fl; classifier->alpha[i * 2 + 1] = flib.fl; } fclose(r); return 0; } #ifdef HAVE_GSL static unsigned int _ccv_bbf_time_measure() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec * 1000000 + tv.tv_usec; } #define less_than(a, b, aux) ((a) < (b)) CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than) #undef less_than static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval) { int i, j; int steps[] = { _ccv_width_padding(size.width), _ccv_width_padding(size.width >> 1), _ccv_width_padding(size.width >> 2) }; int isizs0 = steps[0] * size.height; int isizs01 = isizs0 + steps[1] * (size.height >> 1); for (i = 0; i < posnum; i++) { unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 }; float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; peval[i] = sum; } for (i = 0; i < negnum; i++) { unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 }; float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; neval[i] = sum; } } static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size) { float* peval = (float*)ccmalloc(posnum * sizeof(float)); int i, j, k, rpos = posnum; for (i = 0; i < cascade->count; i++) { _ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0); k = 0; for (j = 0; j < rpos; j++) if (peval[j] >= cascade->stage_classifier[i].threshold) { posdata[k] = posdata[j]; ++k; } else { ccfree(posdata[j]); } rpos = k; } ccfree(peval); return rpos; } static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum) { int t, i, j, k, q; int negperbg; int negtotal = 0; int steps[] = { _ccv_width_padding(cascade->size.width), _ccv_width_padding(cascade->size.width >> 1), _ccv_width_padding(cascade->size.width >> 2) }; int isizs0 = steps[0] * cascade->size.height; int isizs1 = steps[1] * (cascade->size.height >> 1); int isizs2 = steps[2] * (cascade->size.height >> 2); int* idcheck = (int*)ccmalloc(negnum * sizeof(int)); gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); gsl_rng_set(rng, (unsigned long int)idcheck); ccv_size_t imgsz = cascade->size; int rneg = negtotal; for (t = 0; negtotal < negnum; t++) { PRINT(CCV_CLI_INFO, "preparing negative data ... 0%%"); for (i = 0; i < bgnum; i++) { negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal; ccv_dense_matrix_t* image = 0; ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE); assert((image->type & CCV_C1) && (image->type & CCV_8U)); if (image == 0) { PRINT(CCV_CLI_ERROR, "\n%s file corrupted\n", bgfiles[i]); continue; } if (t % 2 != 0) ccv_flip(image, 0, 0, CCV_FLIP_X); if (t % 4 >= 2) ccv_flip(image, 0, 0, CCV_FLIP_Y); ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size }; ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params); memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int)); for (j = 0; j < ccv_min(detected->rnum, negperbg); j++) { int r = gsl_rng_uniform_int(rng, detected->rnum); int flag = 1; ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r); while (flag) { flag = 0; for (k = 0; k < j; k++) if (r == idcheck[k]) { flag = 1; r = gsl_rng_uniform_int(rng, detected->rnum); break; } rect = (ccv_rect_t*)ccv_array_get(detected, r); if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x > image->cols) || (rect->height + rect->y > image->rows)) { flag = 1; r = gsl_rng_uniform_int(rng, detected->rnum); } } idcheck[j] = r; ccv_dense_matrix_t* temp = 0; ccv_dense_matrix_t* imgs0 = 0; ccv_dense_matrix_t* imgs1 = 0; ccv_dense_matrix_t* imgs2 = 0; ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width); ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA); assert(imgs0->step == steps[0]); ccv_matrix_free(temp); ccv_sample_down(imgs0, &imgs1, 0, 0, 0); assert(imgs1->step == steps[1]); ccv_sample_down(imgs1, &imgs2, 0, 0, 0); assert(imgs2->step == steps[2]); negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2); unsigned char* u8s0 = negdata[negtotal]; unsigned char* u8s1 = negdata[negtotal] + isizs0; unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1; unsigned char* u8[] = { u8s0, u8s1, u8s2 }; memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step); ccv_matrix_free(imgs0); memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step); ccv_matrix_free(imgs1); memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step); ccv_matrix_free(imgs2); flag = 1; ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (k = 0; k < cascade->count; ++k, ++classifier) { float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; if (sum < classifier->threshold) { flag = 0; break; } } if (!flag) ccfree(negdata[negtotal]); else { ++negtotal; if (negtotal >= negnum) break; } } ccv_array_free(detected); ccv_matrix_free(image); ccv_drain_cache(); PRINT(CCV_CLI_INFO, "\rpreparing negative data ... %2d%%", 100 * negtotal / negnum); fflush(0); if (negtotal >= negnum) break; } if (rneg == negtotal) break; rneg = negtotal; PRINT(CCV_CLI_INFO, "\nentering additional round %d\n", t + 1); } gsl_rng_free(rng); ccfree(idcheck); ccv_drain_cache(); PRINT(CCV_CLI_INFO, "\n"); return negtotal; } static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum) { PRINT(CCV_CLI_INFO, "preparing positive data ... 0%%"); int i; for (i = 0; i < posnum; i++) { ccv_dense_matrix_t* imgs0 = posimg[i]; ccv_dense_matrix_t* imgs1 = 0; ccv_dense_matrix_t* imgs2 = 0; assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width); ccv_sample_down(imgs0, &imgs1, 0, 0, 0); ccv_sample_down(imgs1, &imgs2, 0, 0, 0); int isizs0 = imgs0->rows * imgs0->step; int isizs1 = imgs1->rows * imgs1->step; int isizs2 = imgs2->rows * imgs2->step; posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2); memcpy(posdata[i], imgs0->data.u8, isizs0); memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1); memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2); PRINT(CCV_CLI_INFO, "\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum); fflush(0); ccv_matrix_free(imgs1); ccv_matrix_free(imgs2); } ccv_drain_cache(); PRINT(CCV_CLI_INFO, "\n"); } typedef struct { double fitness; int pk, nk; int age; double error; ccv_bbf_feature_t feature; } ccv_bbf_gene_t; static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene) { gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015)); } static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z) { int i; for (i = 0; i < gene->pk; i++) if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i]) return 1; for (i = 0; i < gene->nk; i++) if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i]) return 1; return 0; } static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols) { int i; do { gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1; gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1; } while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */ gene->feature.size = ccv_max(gene->pk, gene->nk); gene->age = 0; for (i = 0; i < CCV_BBF_POINT_MAX; i++) { gene->feature.pz[i] = -1; gene->feature.nz[i] = -1; } int x, y, z; for (i = 0; i < gene->pk; i++) { do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(gene, x, y, z)); gene->feature.pz[i] = z; gene->feature.px[i] = x; gene->feature.py[i] = y; } for (i = 0; i < gene->nk; i++) { do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while ( _ccv_bbf_exist_gene_feature(gene, x, y, z)); gene->feature.nz[i] = z; gene->feature.nx[i] = x; gene->feature.ny[i] = y; } } static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw) { int i; int steps[] = { _ccv_width_padding(size.width), _ccv_width_padding(size.width >> 1), _ccv_width_padding(size.width >> 2) }; int isizs0 = steps[0] * size.height; int isizs01 = isizs0 + steps[1] * (size.height >> 1); double error = 0; for (i = 0; i < posnum; i++) { unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 }; if (!_ccv_run_bbf_feature(feature, steps, u8)) error += pw[i]; } for (i = 0; i < negnum; i++) { unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 }; if ( _ccv_run_bbf_feature(feature, steps, u8)) error += nw[i]; } return error; } #define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness) static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than) #undef less_than static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw) { ccv_bbf_feature_t best; /* seed (random method) */ gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); union { unsigned long int li; double db; } dbli; dbli.db = pw[0] + nw[0]; gsl_rng_set(rng, dbli.li); int i, j; int pnum = ftnum * 100; assert(pnum > 0); ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t)); int rows[] = { size.height, size.height >> 1, size.height >> 2 }; int cols[] = { size.width, size.width >> 1, size.width >> 2 }; for (i = 0; i < pnum; i++) _ccv_bbf_randomize_gene(rng, &gene[i], rows, cols); unsigned int timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; for (i = 0; i < pnum; i++) _ccv_bbf_genetic_fitness(&gene[i]); double best_err = 1; int rnum = ftnum * 39; /* number of randomize */ int mnum = ftnum * 40; /* number of mutation */ int hnum = ftnum * 20; /* number of hybrid */ /* iteration stop crit : best no change in 40 iterations */ int it = 0, t; for (t = 0 ; it < 40; ++it, ++t) { int min_id = 0; double min_err = gene[0].error; for (i = 1; i < pnum; i++) if (gene[i].error < min_err) { min_id = i; min_err = gene[i].error; } min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw); if (min_err < best_err) { best_err = min_err; memcpy(&best, &gene[min_id].feature, sizeof(best)); PRINT(CCV_CLI_INFO, "best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size); for (i = 0; i < best.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.px[i], best.py[i], best.pz[i]); PRINT(CCV_CLI_INFO, "\n|-negative point: "); for (i = 0; i < best.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]); PRINT(CCV_CLI_INFO, "\n"); it = 0; } PRINT(CCV_CLI_INFO, "minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000); _ccv_bbf_genetic_qsort(gene, pnum, 0); for (i = 0; i < ftnum; i++) ++gene[i].age; for (i = ftnum; i < ftnum + mnum; i++) { int parent = gsl_rng_uniform_int(rng, ftnum); memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t)); /* three mutation strategy : 1. add, 2. remove, 3. refine */ int pnm, pn = gsl_rng_uniform_int(rng, 2); int* pnk[] = { &gene[i].pk, &gene[i].nk }; int* pnx[] = { gene[i].feature.px, gene[i].feature.nx }; int* pny[] = { gene[i].feature.py, gene[i].feature.ny }; int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz }; int x, y, z; int victim, decay = 1; do { switch (gsl_rng_uniform_int(rng, 3)) { case 0: /* add */ if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX) break; while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX) pn = gsl_rng_uniform_int(rng, 2); do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z)); pnz[pn][*pnk[pn]] = z; pnx[pn][*pnk[pn]] = x; pny[pn][*pnk[pn]] = y; ++(*pnk[pn]); gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); decay = gene[i].age = 0; break; case 1: /* remove */ if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */ break; while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN) pn = gsl_rng_uniform_int(rng, 2); victim = gsl_rng_uniform_int(rng, *pnk[pn]); for (j = victim; j < *pnk[pn] - 1; j++) { pnz[pn][j] = pnz[pn][j + 1]; pnx[pn][j] = pnx[pn][j + 1]; pny[pn][j] = pny[pn][j + 1]; } pnz[pn][*pnk[pn] - 1] = -1; --(*pnk[pn]); gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); decay = gene[i].age = 0; break; case 2: /* refine */ pnm = gsl_rng_uniform_int(rng, *pnk[pn]); do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z)); pnz[pn][pnm] = z; pnx[pn][pnm] = x; pny[pn][pnm] = y; decay = gene[i].age = 0; break; } } while (decay); } for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++) { /* hybrid strategy: taking positive points from dad, negative points from mum */ int dad, mum; do { dad = gsl_rng_uniform_int(rng, ftnum); mum = gsl_rng_uniform_int(rng, ftnum); } while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */ for (j = 0; j < CCV_BBF_POINT_MAX; j++) { gene[i].feature.pz[j] = -1; gene[i].feature.nz[j] = -1; } gene[i].pk = gene[dad].pk; for (j = 0; j < gene[i].pk; j++) { gene[i].feature.pz[j] = gene[dad].feature.pz[j]; gene[i].feature.px[j] = gene[dad].feature.px[j]; gene[i].feature.py[j] = gene[dad].feature.py[j]; } gene[i].nk = gene[mum].nk; for (j = 0; j < gene[i].nk; j++) { gene[i].feature.nz[j] = gene[mum].feature.nz[j]; gene[i].feature.nx[j] = gene[mum].feature.nx[j]; gene[i].feature.ny[j] = gene[mum].feature.ny[j]; } gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); gene[i].age = 0; } for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++) _ccv_bbf_randomize_gene(rng, &gene[i], rows, cols); timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; for (i = 0; i < pnum; i++) _ccv_bbf_genetic_fitness(&gene[i]); } ccfree(gene); gsl_rng_free(rng); return best; } #define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error) static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than) #undef less_than static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw) { int i = 0; unsigned int timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; _ccv_bbf_best_qsort(gene, pnum, 0); int min_id = 0; double min_err = gene[0].error; for (i = 0; i < pnum; i++) if (gene[i].nk + gene[i].pk >= point_min) { min_id = i; min_err = gene[i].error; break; } PRINT(CCV_CLI_INFO, "local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size); for (i = 0; i < gene[min_id].feature.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]); PRINT(CCV_CLI_INFO, "\n|-negative point: "); for (i = 0; i < gene[min_id].feature.size; i++) PRINT(CCV_CLI_INFO, "(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]); PRINT(CCV_CLI_INFO, "\nthe computation takes %d ms\n", timer / 1000); return gene[min_id]; } static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw) { ccv_bbf_gene_t best_gene; /* seed (random method) */ gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); union { unsigned long int li; double db; } dbli; dbli.db = pw[0] + nw[0]; gsl_rng_set(rng, dbli.li); int i, j, k, q, p, g, t; int rows[] = { size.height, size.height >> 1, size.height >> 2 }; int cols[] = { size.width, size.width >> 1, size.width >> 2 }; int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2]; ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t)); if (best_feature == 0) { /* bootstrapping the best feature, start from two pixels, one for positive, one for negative * the bootstrapping process go like this: first, it will assign a random pixel as positive * and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every * possible pixel as positive, and pick the best one, until it converges */ memset(&best_gene, 0, sizeof(ccv_bbf_gene_t)); for (i = 0; i < CCV_BBF_POINT_MAX; i++) best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1; best_gene.pk = 1; best_gene.nk = 0; best_gene.feature.size = 1; best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3); best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]); best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]); for (t = 0; ; ++t) { g = 0; if (t % 2 == 0) { for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0]) { gene[g] = best_gene; gene[g].pk = gene[g].nk = 1; gene[g].feature.nz[0] = i; gene[g].feature.nx[0] = j; gene[g].feature.ny[0] = k; g++; } } else { for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0]) { gene[g] = best_gene; gene[g].pk = gene[g].nk = 1; gene[g].feature.pz[0] = i; gene[g].feature.px[0] = j; gene[g].feature.py[0] = k; g++; } } PRINT(CCV_CLI_INFO, "bootstrapping round : %d\n", t); ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw); if (local_gene.error >= best_gene.error - 1e-10) break; best_gene = local_gene; } } else { best_gene.feature = *best_feature; best_gene.pk = best_gene.nk = best_gene.feature.size; for (i = 0; i < CCV_BBF_POINT_MAX; i++) if (best_feature->pz[i] == -1) { best_gene.pk = i; break; } for (i = 0; i < CCV_BBF_POINT_MAX; i++) if (best_feature->nz[i] == -1) { best_gene.nk = i; break; } } /* after bootstrapping, the float search technique will do the following permutations: * a). add a new point to positive or negative * b). remove a point from positive or negative * c). move an existing point in positive or negative to another position * the three rules applied exhaustively, no heuristic used. */ for (t = 0; ; ++t) { g = 0; for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i)) { /* add positive point */ if (best_gene.pk < CCV_BBF_POINT_MAX - 1) { gene[g] = best_gene; gene[g].feature.pz[gene[g].pk] = i; gene[g].feature.px[gene[g].pk] = j; gene[g].feature.py[gene[g].pk] = k; gene[g].pk++; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } /* add negative point */ if (best_gene.nk < CCV_BBF_POINT_MAX - 1) { gene[g] = best_gene; gene[g].feature.nz[gene[g].nk] = i; gene[g].feature.nx[gene[g].nk] = j; gene[g].feature.ny[gene[g].nk] = k; gene[g].nk++; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } /* refine positive point */ for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; gene[g].feature.pz[q] = i; gene[g].feature.px[q] = j; gene[g].feature.py[q] = k; g++; } /* add positive point, remove negative point */ if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1) { for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; gene[g].feature.pz[gene[g].pk] = i; gene[g].feature.px[gene[g].pk] = j; gene[g].feature.py[gene[g].pk] = k; gene[g].pk++; for (p = q; p < best_gene.nk - 1; p++) { gene[g].feature.nz[p] = gene[g].feature.nz[p + 1]; gene[g].feature.nx[p] = gene[g].feature.nx[p + 1]; gene[g].feature.ny[p] = gene[g].feature.ny[p + 1]; } gene[g].feature.nz[gene[g].nk - 1] = -1; gene[g].nk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } } /* refine negative point */ for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; gene[g].feature.nz[q] = i; gene[g].feature.nx[q] = j; gene[g].feature.ny[q] = k; g++; } /* add negative point, remove positive point */ if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1) { for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; gene[g].feature.nz[gene[g].nk] = i; gene[g].feature.nx[gene[g].nk] = j; gene[g].feature.ny[gene[g].nk] = k; gene[g].nk++; for (p = q; p < best_gene.pk - 1; p++) { gene[g].feature.pz[p] = gene[g].feature.pz[p + 1]; gene[g].feature.px[p] = gene[g].feature.px[p + 1]; gene[g].feature.py[p] = gene[g].feature.py[p + 1]; } gene[g].feature.pz[gene[g].pk - 1] = -1; gene[g].pk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } } } if (best_gene.pk > 1) for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; for (i = q; i < best_gene.pk - 1; i++) { gene[g].feature.pz[i] = gene[g].feature.pz[i + 1]; gene[g].feature.px[i] = gene[g].feature.px[i + 1]; gene[g].feature.py[i] = gene[g].feature.py[i + 1]; } gene[g].feature.pz[gene[g].pk - 1] = -1; gene[g].pk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } if (best_gene.nk > 1) for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; for (i = q; i < best_gene.nk - 1; i++) { gene[g].feature.nz[i] = gene[g].feature.nz[i + 1]; gene[g].feature.nx[i] = gene[g].feature.nx[i + 1]; gene[g].feature.ny[i] = gene[g].feature.ny[i + 1]; } gene[g].feature.nz[gene[g].nk - 1] = -1; gene[g].nk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } gene[g] = best_gene; g++; PRINT(CCV_CLI_INFO, "float search round : %d\n", t); ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw); if (local_gene.error >= best_gene.error - 1e-10) break; best_gene = local_gene; } ccfree(gene); gsl_rng_free(rng); return best_gene.feature; } static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier) { FILE* w = fopen(file, "wb"); if (w == 0) return -1; fprintf(w, "%d\n", classifier->count); union { float fl; int i; } fli; fli.fl = classifier->threshold; fprintf(w, "%d\n", fli.i); int i, j; for (i = 0; i < classifier->count; i++) { fprintf(w, "%d\n", classifier->feature[i].size); for (j = 0; j < classifier->feature[i].size; j++) { fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]); fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]); } union { float fl; int i; } flia, flib; flia.fl = classifier->alpha[i * 2]; flib.fl = classifier->alpha[i * 2 + 1]; fprintf(w, "%d %d\n", flia.i, flib.i); } fclose(w); return 0; } static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size) { FILE* r = fopen(file, "rb"); if (r == 0) return -1; (void)fread(negnum, sizeof(int), 1, r); int i; int isizs012 = _ccv_width_padding(size.width) * size.height + _ccv_width_padding(size.width >> 1) * (size.height >> 1) + _ccv_width_padding(size.width >> 2) * (size.height >> 2); for (i = 0; i < *negnum; i++) { negdata[i] = (unsigned char*)ccmalloc(isizs012); (void)fread(negdata[i], 1, isizs012, r); } fclose(r); return 0; } static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size) { FILE* w = fopen(file, "w"); if (w == 0) return -1; fwrite(&negnum, sizeof(int), 1, w); int i; int isizs012 = _ccv_width_padding(size.width) * size.height + _ccv_width_padding(size.width >> 1) * (size.height >> 1) + _ccv_width_padding(size.width >> 2) * (size.height >> 2); for (i = 0; i < negnum; i++) fwrite(negdata[i], 1, isizs012, w); fclose(w); return 0; } static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum) { FILE* r = fopen(file, "r"); if (r == 0) return -1; (void)fscanf(r, "%d %d %d", i, k, bg); int j; union { double db; int i[2]; } dbi; for (j = 0; j < posnum; j++) { (void)fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]); pw[j] = dbi.db; } for (j = 0; j < negnum; j++) { (void)fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]); nw[j] = dbi.db; } fclose(r); return 0; } static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum) { FILE* w = fopen(file, "w"); if (w == 0) return -1; fprintf(w, "%d %d %d\n", i, k, bg); int j; union { double db; int i[2]; } dbi; for (j = 0; j < posnum; ++j) { dbi.db = pw[j]; fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]); } fprintf(w, "\n"); for (j = 0; j < negnum; ++j) { dbi.db = nw[j]; fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]); } fprintf(w, "\n"); fclose(w); return 0; } void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params) { int i, j, k; /* allocate memory for usage */ ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); cascade->count = 0; cascade->size = size; cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t)); unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*)); unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*)); double* pw = (double*)ccmalloc(posnum * sizeof(double)); double* nw = (double*)ccmalloc(negnum * sizeof(double)); float* peval = (float*)ccmalloc(posnum * sizeof(float)); float* neval = (float*)ccmalloc(negnum * sizeof(float)); double inv_balance_k = 1. / params.balance_k; /* balance factor k, and weighted with 0.01 */ params.balance_k *= 0.01; inv_balance_k *= 0.01; int steps[] = { _ccv_width_padding(cascade->size.width), _ccv_width_padding(cascade->size.width >> 1), _ccv_width_padding(cascade->size.width >> 2) }; int isizs0 = steps[0] * cascade->size.height; int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1); i = 0; k = 0; int bg = 0; int cacheK = 10; /* state resume code */ char buf[1024]; sprintf(buf, "%s/stat.txt", dir); _ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum); if (i > 0) { cascade->count = i; ccfree(cascade->stage_classifier); cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t)); for (j = 0; j < i; j++) { sprintf(buf, "%s/stage-%d.txt", dir, j); _ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]); } } if (k > 0) cacheK = k; int rpos, rneg = 0; if (bg) { sprintf(buf, "%s/negs.txt", dir); _ccv_read_background_data(buf, negdata, &rneg, cascade->size); } for (; i < params.layer; i++) { if (!bg) { rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum); /* save state of background data */ sprintf(buf, "%s/negs.txt", dir); _ccv_write_background_data(buf, negdata, rneg, cascade->size); bg = 1; } double totalw; /* save state of cascade : level, weight etc. */ sprintf(buf, "%s/stat.txt", dir); _ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum); ccv_bbf_stage_classifier_t classifier; if (k > 0) { /* resume state of classifier */ sprintf( buf, "%s/stage-%d.txt", dir, i ); _ccv_read_bbf_stage_classifier(buf, &classifier); } else { /* initialize classifier */ for (j = 0; j < posnum; j++) pw[j] = params.balance_k; for (j = 0; j < rneg; j++) nw[j] = inv_balance_k; classifier.count = k; classifier.threshold = 0; classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t)); classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float)); } _ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum); rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size); PRINT(CCV_CLI_INFO, "%d postivie data and %d negative data in training\n", rpos, rneg); /* reweight to 1.00 */ totalw = 0; for (j = 0; j < rpos; j++) totalw += pw[j]; for (j = 0; j < rneg; j++) totalw += nw[j]; for (j = 0; j < rpos; j++) pw[j] = pw[j] / totalw; for (j = 0; j < rneg; j++) nw[j] = nw[j] / totalw; for (; ; k++) { /* get overall true-positive, false-positive rate and threshold */ double tp = 0, fp = 0, etp = 0, efp = 0; _ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval); _ccv_sort_32f(peval, rpos, 0); classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6; for (j = 0; j < rpos; j++) { if (peval[j] >= 0) ++tp; if (peval[j] >= classifier.threshold) ++etp; } tp /= rpos; etp /= rpos; for (j = 0; j < rneg; j++) { if (neval[j] >= 0) ++fp; if (neval[j] >= classifier.threshold) ++efp; } fp /= rneg; efp /= rneg; PRINT(CCV_CLI_INFO, "stage classifier real TP rate : %f, FP rate : %f\n", tp, fp); PRINT(CCV_CLI_INFO, "stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold); if (k > 0) { /* save classifier state */ sprintf(buf, "%s/stage-%d.txt", dir, i); _ccv_write_bbf_stage_classifier(buf, &classifier); sprintf(buf, "%s/stat.txt", dir); _ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum); } if (etp > params.pos_crit && efp < params.neg_crit) break; /* TODO: more post-process is needed in here */ /* select the best feature in current distribution through genetic algorithm optimization */ ccv_bbf_feature_t best; if (params.optimizer == CCV_BBF_GENETIC_OPT) { best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw); } else if (params.optimizer == CCV_BBF_FLOAT_OPT) { best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw); } else { best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw); best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw); } double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw); double rw = (1 - err) / err; totalw = 0; /* reweight */ for (j = 0; j < rpos; j++) { unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 }; if (!_ccv_run_bbf_feature(&best, steps, u8)) pw[j] *= rw; pw[j] *= params.balance_k; totalw += pw[j]; } for (j = 0; j < rneg; j++) { unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 }; if (_ccv_run_bbf_feature(&best, steps, u8)) nw[j] *= rw; nw[j] *= inv_balance_k; totalw += nw[j]; } for (j = 0; j < rpos; j++) pw[j] = pw[j] / totalw; for (j = 0; j < rneg; j++) nw[j] = nw[j] / totalw; double c = log(rw); PRINT(CCV_CLI_INFO, "coefficient of feature %d: %f\n", k + 1, c); classifier.count = k + 1; /* resizing classifier */ if (k >= cacheK) { ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t)); memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t)); ccfree(classifier.feature); float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float)); memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float)); ccfree(classifier.alpha); classifier.feature = feature; classifier.alpha = alpha; cacheK *= 2; } /* setup new feature */ classifier.feature[k] = best; classifier.alpha[k * 2] = -c; classifier.alpha[k * 2 + 1] = c; } cascade->count = i + 1; ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t)); ccfree(cascade->stage_classifier); stage_classifier[i] = classifier; cascade->stage_classifier = stage_classifier; k = 0; bg = 0; for (j = 0; j < rpos; j++) ccfree(posdata[j]); for (j = 0; j < rneg; j++) ccfree(negdata[j]); } ccfree(neval); ccfree(peval); ccfree(nw); ccfree(pw); ccfree(negdata); ccfree(posdata); ccfree(cascade); } #else void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params) { fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n"); } #endif static int _ccv_is_equal(const void* _r1, const void* _r2, void* data) { const ccv_comp_t* r1 = (const ccv_comp_t*)_r1; const ccv_comp_t* r2 = (const ccv_comp_t*)_r2; int distance = (int)(r1->rect.width * 0.25 + 0.5); return r2->rect.x <= r1->rect.x + distance && r2->rect.x >= r1->rect.x - distance && r2->rect.y <= r1->rect.y + distance && r2->rect.y >= r1->rect.y - distance && r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) && (int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width; } static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data) { const ccv_comp_t* r1 = (const ccv_comp_t*)_r1; const ccv_comp_t* r2 = (const ccv_comp_t*)_r2; int distance = (int)(r1->rect.width * 0.25 + 0.5); return r2->classification.id == r1->classification.id && r2->rect.x <= r1->rect.x + distance && r2->rect.x >= r1->rect.x - distance && r2->rect.y <= r1->rect.y + distance && r2->rect.y >= r1->rect.y - distance && r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) && (int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width; } ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params) { int hr = a->rows / params.size.height; int wr = a->cols / params.size.width; double scale = pow(2., 1. / (params.interval + 1.)); int next = params.interval + 1; int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale)); ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*)); memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*)); if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width) ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA); else pyr[0] = a; int i, j, k, t, x, y, q; for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++) ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA); for (i = next; i < scale_upto + next * 2; i++) ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0); if (params.accurate) for (i = next * 2; i < scale_upto + next * 2; i++) { ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0); ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1); ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1); } ccv_array_t* idx_seq; ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0); ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0); ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0); /* detect in multi scale */ for (t = 0; t < count; t++) { ccv_bbf_classifier_cascade_t* cascade = _cascade[t]; float scale_x = (float) params.size.width / (float) cascade->size.width; float scale_y = (float) params.size.height / (float) cascade->size.height; ccv_array_clear(seq); for (i = 0; i < scale_upto; i++) { int dx[] = {0, 1, 0, 1}; int dy[] = {0, 0, 1, 1}; int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2); int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step }; int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2); int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4, pyr[i * 4 + next * 4]->step * 2 - i_cols * 2, pyr[i * 4 + next * 8]->step - i_cols }; for (q = 0; q < (params.accurate ? 4 : 1); q++) { unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 }; for (y = 0; y < i_rows; y++) { for (x = 0; x < i_cols; x++) { float sum; int flag = 1; ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (j = 0; j < cascade->count; ++j, ++classifier) { sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; if (sum < classifier->threshold) { flag = 0; break; } } if (flag) { ccv_comp_t comp; comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5)); comp.neighbors = 1; comp.classification.id = t; comp.classification.confidence = sum; ccv_array_push(seq, &comp); } u8[0] += 4; u8[1] += 2; u8[2] += 1; } u8[0] += paddings[0]; u8[1] += paddings[1]; u8[2] += paddings[2]; } } scale_x *= scale; scale_y *= scale; } /* the following code from OpenCV's haar feature implementation */ if(params.min_neighbors == 0) { for (i = 0; i < seq->rnum; i++) { ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i); ccv_array_push(result_seq, comp); } } else { idx_seq = 0; ccv_array_clear(seq2); // group retrieved rectangles in order to filter out noise int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0); ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t)); memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t)); // count number of neighbors for(i = 0; i < seq->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i); int idx = *(int*)ccv_array_get(idx_seq, i); if (comps[idx].neighbors == 0) comps[idx].classification.confidence = r1.classification.confidence; ++comps[idx].neighbors; comps[idx].rect.x += r1.rect.x; comps[idx].rect.y += r1.rect.y; comps[idx].rect.width += r1.rect.width; comps[idx].rect.height += r1.rect.height; comps[idx].classification.id = r1.classification.id; comps[idx].classification.confidence = ccv_max(comps[idx].classification.confidence, r1.classification.confidence); } // calculate average bounding box for(i = 0; i < ncomp; i++) { int n = comps[i].neighbors; if(n >= params.min_neighbors) { ccv_comp_t comp; comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n); comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n); comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n); comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n); comp.neighbors = comps[i].neighbors; comp.classification.id = comps[i].classification.id; comp.classification.confidence = comps[i].classification.confidence; ccv_array_push(seq2, &comp); } } // filter out small face rectangles inside large face rectangles for(i = 0; i < seq2->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i); int flag = 1; for(j = 0; j < seq2->rnum; j++) { ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j); int distance = (int)(r2.rect.width * 0.25 + 0.5); if(i != j && r1.classification.id == r2.classification.id && r1.rect.x >= r2.rect.x - distance && r1.rect.y >= r2.rect.y - distance && r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance && r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance && (r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3)) { flag = 0; break; } } if(flag) ccv_array_push(result_seq, &r1); } ccv_array_free(idx_seq); ccfree(comps); } } ccv_array_free(seq); ccv_array_free(seq2); ccv_array_t* result_seq2; /* the following code from OpenCV's haar feature implementation */ if (params.flags & CCV_BBF_NO_NESTED) { result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0); idx_seq = 0; // group retrieved rectangles in order to filter out noise int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0); ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t)); memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t)); // count number of neighbors for(i = 0; i < result_seq->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i); int idx = *(int*)ccv_array_get(idx_seq, i); if (comps[idx].neighbors == 0 || comps[idx].classification.confidence < r1.classification.confidence) { comps[idx].classification.confidence = r1.classification.confidence; comps[idx].neighbors = 1; comps[idx].rect = r1.rect; comps[idx].classification.id = r1.classification.id; } } // calculate average bounding box for(i = 0; i < ncomp; i++) if(comps[i].neighbors) ccv_array_push(result_seq2, &comps[i]); ccv_array_free(result_seq); ccfree(comps); } else { result_seq2 = result_seq; } for (i = 1; i < scale_upto + next * 2; i++) ccv_matrix_free(pyr[i * 4]); if (params.accurate) for (i = next * 2; i < scale_upto + next * 2; i++) { ccv_matrix_free(pyr[i * 4 + 1]); ccv_matrix_free(pyr[i * 4 + 2]); ccv_matrix_free(pyr[i * 4 + 3]); } if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width) ccv_matrix_free(pyr[0]); return result_seq2; } ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory) { char buf[1024]; sprintf(buf, "%s/cascade.txt", directory); int s, i; FILE* r = fopen(buf, "r"); if (r == 0) return 0; ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height); assert(s > 0); cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); for (i = 0; i < cascade->count; i++) { sprintf(buf, "%s/stage-%d.txt", directory, i); if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0) { cascade->count = i; break; } } fclose(r); return cascade; } ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s) { int i; ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count); memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width); memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height); ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); for (i = 0; i < cascade->count; i++, classifier++) { memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count); memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold); classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t)); classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float)); memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t); memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float); } return cascade; } int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen) { int i; int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height); ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (i = 0; i < cascade->count; i++, classifier++) len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float); if (slen >= len) { memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count); memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width); memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height); classifier = cascade->stage_classifier; for (i = 0; i < cascade->count; i++, classifier++) { memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count); memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold); memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t); memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float); } } return len; } void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade) { int i; for (i = 0; i < cascade->count; ++i) { ccfree(cascade->stage_classifier[i].feature); ccfree(cascade->stage_classifier[i].alpha); } ccfree(cascade->stage_classifier); ccfree(cascade); }
par_csr_matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* In addition to publically accessible interface in HYPRE_mv.h, the implementation in this file uses accessor macros into the sequential matrix structure, and so includes the .h that defines that structure. Should those accessor functions become proper functions at some later date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* If create is called and row_starts and col_starts are NOT null, then it is assumed that they are of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix* hypre_ParCSRMatrixCreate( MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt *row_starts, HYPRE_BigInt *col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd ) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1]-first_row_index ; first_col_diag = col_starts[0]; local_num_cols = col_starts[1]-first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* We could make these null instead of leaving the range. If that change is made, then when this create is called from functions like the matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix ) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if ( hypre_ParCSRMatrixOwnsData(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if ( hypre_ParCSRMatrixDiagT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if ( hypre_ParCSRMatrixOffdT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if ( hypre_ParCSRMatrixOwnsColStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* RL: this is actually not correct since the memory_location may have been changed after allocation * put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) ) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if ( hypre_ParCSRMatrixProcOrdering(matrix) ) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) ); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix* hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location) ) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format ) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_data ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_row_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_col_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead( MPI_Comm comm, const char *file_name ) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* the bgl input file should only contain the EXACT range for local processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i=1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows,0,0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix, const char *file_name ) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i=0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0]+(HYPRE_BigInt)base_i; iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1; jlower = col_starts[0]+(HYPRE_BigInt)base_j; jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt)(i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i+1]; j++) { J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j); if ( diag_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if ( num_nonzeros_offd ) { for (j = offd_i[i]; j < offd_i[i+1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j; if ( offd_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J ); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_i_ptr, HYPRE_Int *base_j_ptr, hypre_ParCSRMatrix **matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int)row_starts[0]; base_j = (HYPRE_Int)col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int)(I-big_base_i-first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt)offd_j[i]; hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i=1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i=0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i=0; i < num_rows; i++) { i_col = diag_i[i]; for (j=i_col; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix, HYPRE_BigInt *row_start, HYPRE_BigInt *row_end, HYPRE_BigInt *col_start, HYPRE_BigInt *col_end ) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id ); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id ); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return(-1); } /* if buffer is not allocated and some information is requested, allocate buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values )) { /* allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1,tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for ( i = 0; i < m; i++ ) { tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row-row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow+1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); nzB = hypre_CSRMatrixI(Ba)[lrow+1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* Sort by increasing column numbers, assuming A and B already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for ( i = 0; i < nzA; i++ ) { v_p[imark+i] = vworkA[i]; } for ( i = imark; i < nzB; i++ ) { v_p[nzA+i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for ( i = 0; i < imark; i++ ) { idx_p[i] = cmap[cworkB[i]]; } } else { for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for ( i = 0; i < nzA; i++ ) { idx_p[imark+i] = cstart + cworkA[i]; } for ( i = imark; i < nzB; i++ ) { idx_p[nzA+i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { #if defined(HYPRE_USING_CUDA) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm, hypre_CSRMatrix *A, HYPRE_BigInt *global_row_starts, HYPRE_BigInt *global_col_starts ) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2*(num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* contains code for what to expect, if 0: global_row_starts = global_col_starts, only global_row_starts given if 1: only global_row_starts given, global_col_starts = NULL if 2: both global_row_starts and global_col_starts given if 3: only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2*(num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i+num_procs+5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); // Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); // Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i+1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i+1]] - A_i[(HYPRE_Int)global_row_starts[i]]; } //num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i-1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs-1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix *A, hypre_ParCSRMatrix *matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i=0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i=0; i < num_rows; i++) { for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i=0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); for (i=0; i < num_rows+1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows/num_threads; rest = num_rows - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } count = diag_i[ns]+offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j=diag_i[i]; j < diag_i[i+1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag; } for (j=offd_i[i]; j < offd_i[i+1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* determine procs that have vector data and store their ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_BigInt)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); num_requests = 4*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* exchange contents of local_matrix_i - here we are sending to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering?*/ offset = matrix_i[new_vec_starts[1]]; for (i=1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++) matrix_i[j+1] += offset; offset = matrix_i[new_vec_starts[i+1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i=0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Int copy_data ) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B ) { hypre_ParCSRMatrix * C; HYPRE_BigInt * col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm( A ); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B ) == hypre_ParCSRMatrixFirstRowIndex( A ) ); hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A ); hypre_ParCSRMatrixOwnsRowStarts( C ) = 0; hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A ); hypre_ParCSRMatrixOwnsColStarts( C ) = 0; for ( p=0; p<=num_procs; ++p ) hypre_assert( hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B) ); hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0 ); hypre_ParCSRMatrixOffd( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C ); hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C; hypre_ParCSRMatrixCommPkg( C ) = NULL; hypre_ParCSRMatrixCommPkgT( C ) = NULL; hypre_ParCSRMatrixOwnsData( C ) = 1; /* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. I suspect, but don't know, that other parts of hypre do not assume that the correct values have been set. hypre_ParCSRMatrixSetNumNonzeros( C ); hypre_ParCSRMatrixSetDNumNonzeros( C );*/ hypre_ParCSRMatrixNumNonzeros( C ) = 0; hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0; hypre_ParCSRMatrixRowindices( C ) = NULL; hypre_ParCSRMatrixRowvalues( C ) = NULL; hypre_ParCSRMatrixGetrowactive( C ) = 0; return C; } /* drop the entries that are not on the diagonal and smaller than * its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag ++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* in normal cases: diagonal entry should not * appear in A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd ++; } } } A_diag_i_i = A_diag_i[i+1]; A_offd_i_i = A_offd_i[i+1]; A_diag_i[i+1] = nnz_diag; A_offd_i[i+1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if ( hypre_ParCSRMatrixCommPkg(A) ) { hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) ); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Perform dual truncation of ParCSR matrix. * This code is adapted from original BoomerAMGInterpTruncate() * A: parCSR matrix to be modified * tol: relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping), * this way, the application of the truncated matrix on a constant vector is the same as that of * the original matrix. * nrm_type: type of norm used for dropping with tol. * -- 0 = infinity-norm * -- 1 = 1-norm * -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global=0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cum_lost_per_thread; HYPRE_Int * num_lost_per_thread; HYPRE_Int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. A_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, A_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v*v; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v*v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i+1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i+1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < (A_diag_i[i+1]-num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i+1]-num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i=start; i<stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* Some rows exceed max_row_elmts, and require truncation. Essentially, * each thread truncates and compresses its range of rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (max_row_elmts < num_elmts) { /* copy both diagonal and off-diag parts of row i to _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j]+num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt-cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j,A_aux_data,0,cnt-1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j]-num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag-A_diag_i[i]; num_lost_offd -= cnt_offd-A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* nothing dropped from this row, but still have to shift entries back * by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index-A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd-A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_diag_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size ; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_offd_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size ; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
GB_unop__frexpx_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__frexpx_fp64_fp64) // op(A') function: GB (_unop_tran__frexpx_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = GB_frexpx (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpx (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = GB_frexpx (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPX || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__frexpx_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = GB_frexpx (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = GB_frexpx (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__frexpx_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sgemm.c
/*********************************************************************/ /* Copyright 2009, 2010 The University of Texas at Austin. */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or */ /* without modification, are permitted provided that the following */ /* conditions are met: */ /* */ /* 1. Redistributions of source code must retain the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer. */ /* */ /* 2. Redistributions in binary form must reproduce the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer in the documentation and/or other materials */ /* provided with the distribution. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* */ /* The views and conclusions contained in the software and */ /* documentation are those of the authors and should not be */ /* interpreted as representing official policies, either expressed */ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ /* Modified by Marko Vitez for Purdue University */ #include <omp.h> #include <stdlib.h> #include <stdio.h> #include <sched.h> #include "blas.h" #include "../sgemm.h" #define CACHE_LINE_SIZE 8 #define DIVIDE_RATE 2 #ifdef ARM #define SWITCH_RATIO 2 #define GEMM_P 128 #define GEMM_Q 240 #define GEMM_R 12288 #define GEMM_UNROLL_M 4 #define GEMM_UNROLL_N 4 #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) sgemm_otcopy(M, N, (float *)(A) + ((Y) + (X) * (LDA)), LDA, BUFFER) #define ICOPYT_OPERATION(M, N, A, LDA, X, Y, BUFFER) sgemm_oncopy(M, N, (float *)(A) + ((X) + (Y) * (LDA)), LDA, BUFFER) #else #define SWITCH_RATIO 4 #if defined SANDYBRIDGE || defined HASWELL #define GEMM_UNROLL_M 16 #define GEMM_P 768 #define GEMM_Q 384 #define GEMM_R 21056 #else #define GEMM_UNROLL_M 8 #define GEMM_P 1024 #define GEMM_Q 512 #define GEMM_R 15328 #endif #define GEMM_UNROLL_N 4 #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) sgemm_itcopy(M, N, (float *)(A) + ((Y) + (X) * (LDA)), LDA, BUFFER) #define ICOPYT_OPERATION(M, N, A, LDA, X, Y, BUFFER) sgemm_incopy(M, N, (float *)(A) + ((X) + (Y) * (LDA)), LDA, BUFFER) #endif // #ifdef ARM #define BUFFER_SIZE (((GEMM_P + GEMM_R) * GEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN) #define GEMM_ALIGN 0x03fffUL #define MIN(a,b) ((a)>(b) ? (b) : (a)) typedef struct { volatile long working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; #define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) sgemm_oncopy(M, N, (float *)(A) + ((X) + (Y) * (LDA)), LDA, BUFFER) #define OCOPYT_OPERATION(M, N, A, LDA, X, Y, BUFFER) sgemm_otcopy(M, N, (float *)(A) + ((Y) + (X) * (LDA)), LDA, BUFFER) #define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) sgemm_kernel(M, N, K, ALPHA, SA, SB, (float *)(C) + ((X) + (Y) * LDC), LDC) #define BETA_OPERATION(M_FROM, M_TO, N_FROM, N_TO, BETA, C, LDC) sgemm_beta((M_TO) - (M_FROM), (N_TO - N_FROM), 0, \ BETA, NULL, 0, NULL, 0, (float *)(C) + (M_FROM) + (N_FROM) * (LDC), LDC) #ifdef TIMING #define START_RPCC() rpcc_counter = rpcc() #define STOP_RPCC(COUNTER) COUNTER += rpcc() - rpcc_counter #else #define START_RPCC() #define STOP_RPCC(COUNTER) #endif #ifdef ARM #define WMB __asm__ __volatile__ ("dmb ishst" : : : "memory") #define YIELDING asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop; \n"); #else #define WMB #define YIELDING sched_yield() #endif int threads_num; float *saa[MAX_CPU_NUMBER], *sba[MAX_CPU_NUMBER]; static struct { int x1, y1, plane; } *xtab; static struct { int x1, y1; } *ytab; static void calctabs(struct sgemmargs *args) { int x, y; xtab = malloc(sizeof(*xtab) * args->k); ytab = malloc(sizeof(*ytab) * args->m); for(x = 0; x < args->k; x++) { xtab[x].plane = x / args->ks0; int x1 = x % args->ks0; int y1 = x1 / args->ks1 - args->padH; x1 = x1 % args->ks1 - args->padW; xtab[x].y1 = y1 * args->is1; xtab[x].x1 = x1; } for(y = 0; y < args->m; y++) { int y1 = y / args->os1 * args->dH; int x1 = y % args->os1 * args->dW; ytab[y].y1 = y1 * args->is1; ytab[y].x1 = x1; } } static float get_a_pad(struct sgemmargs *args, int x, int y) { int x1 = xtab[x].x1 + ytab[y].x1; int y1 = xtab[x].y1 + ytab[y].y1; if(y1 < 0 || y1 >= args->is0 || x1 < 0 || x1 >= args->is1) return 0; return args->a[xtab[x].plane*args->is0 + y1 + x1]; } static float get_a_nopad(struct sgemmargs *args, int x, int y) { int x1 = xtab[x].x1 + ytab[y].x1; int y1 = xtab[x].y1 + ytab[y].y1; return args->a[xtab[x].plane*args->is0 + y1 + x1]; } #ifdef ARM static void icopy_operation_pad(int m, int n, struct sgemmargs *args, int x, int y, float *b) { int i, i1, j, im; //printf("icopy (%d,%d)%d (%d,%d)\n", m, n, lda, x, y); for(j = 0; j + 3 < n; j += 4) { for(i = 0; i < m; i += 4) { im = m - i > 4 ? 4 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*4 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*4 + 1] = get_a_pad(args, x+i+i1, y+j+1); b[i1*4 + 2] = get_a_pad(args, x+i+i1, y+j+2); b[i1*4 + 3] = get_a_pad(args, x+i+i1, y+j+3); } b += im * 4; } } if(j + 1 < n) { for(i = 0; i < m; i += 4) { im = m - i > 4 ? 4 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*2 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*2 + 1] = get_a_pad(args, x+i+i1, y+j+1); } b += im * 2; } j += 2; } if(j < n) { for(i = 0; i < m; i += 4) { im = m - i > 4 ? 4 : m - i; for(i1 = 0; i1 < im; i1++) b[i1] = get_a_pad(args, x+i+i1, y+j); b += im; } } } static void icopy_operation_nopad(int m, int n, struct sgemmargs *args, int x, int y, float *b) { int i, i1, j, im; //printf("icopy (%d,%d)%d (%d,%d)\n", m, n, lda, x, y); for(j = 0; j + 3 < n; j += 4) { for(i = 0; i < m; i += 4) { im = m - i > 4 ? 4 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*4 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*4 + 1] = get_a_nopad(args, x+i+i1, y+j+1); b[i1*4 + 2] = get_a_nopad(args, x+i+i1, y+j+2); b[i1*4 + 3] = get_a_nopad(args, x+i+i1, y+j+3); } b += im * 4; } } if(j + 1 < n) { for(i = 0; i < m; i += 4) { im = m - i > 4 ? 4 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*2 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*2 + 1] = get_a_nopad(args, x+i+i1, y+j+1); } b += im * 2; } j += 2; } if(j < n) { for(i = 0; i < m; i += 4) { im = m - i > 4 ? 4 : m - i; for(i1 = 0; i1 < im; i1++) b[i1] = get_a_nopad(args, x+i+i1, y+j); b += im; } } } #else #if defined SANDYBRIDGE || defined HASWELL static void icopy_operation_pad(int m, int n, struct sgemmargs *args, int x, int y, float *b) { int i, i1, j, im; for(j = 0; j + 15 < n; j += 16) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*16 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*16 + 1] = get_a_pad(args, x+i+i1, y+j+1); b[i1*16 + 2] = get_a_pad(args, x+i+i1, y+j+2); b[i1*16 + 3] = get_a_pad(args, x+i+i1, y+j+3); b[i1*16 + 4] = get_a_pad(args, x+i+i1, y+j+4); b[i1*16 + 5] = get_a_pad(args, x+i+i1, y+j+5); b[i1*16 + 6] = get_a_pad(args, x+i+i1, y+j+6); b[i1*16 + 7] = get_a_pad(args, x+i+i1, y+j+7); b[i1*16 + 8] = get_a_pad(args, x+i+i1, y+j+8); b[i1*16 + 9] = get_a_pad(args, x+i+i1, y+j+9); b[i1*16 + 10] = get_a_pad(args, x+i+i1, y+j+10); b[i1*16 + 11] = get_a_pad(args, x+i+i1, y+j+11); b[i1*16 + 12] = get_a_pad(args, x+i+i1, y+j+12); b[i1*16 + 13] = get_a_pad(args, x+i+i1, y+j+13); b[i1*16 + 14] = get_a_pad(args, x+i+i1, y+j+14); b[i1*16 + 15] = get_a_pad(args, x+i+i1, y+j+15); } b += im * 16; } } if(j + 7 < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*8 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*8 + 1] = get_a_pad(args, x+i+i1, y+j+1); b[i1*8 + 2] = get_a_pad(args, x+i+i1, y+j+2); b[i1*8 + 3] = get_a_pad(args, x+i+i1, y+j+3); b[i1*8 + 4] = get_a_pad(args, x+i+i1, y+j+4); b[i1*8 + 5] = get_a_pad(args, x+i+i1, y+j+5); b[i1*8 + 6] = get_a_pad(args, x+i+i1, y+j+6); b[i1*8 + 7] = get_a_pad(args, x+i+i1, y+j+7); } b += im * 8; } j += 8; } if(j + 3 < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*4 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*4 + 1] = get_a_pad(args, x+i+i1, y+j+1); b[i1*4 + 2] = get_a_pad(args, x+i+i1, y+j+2); b[i1*4 + 3] = get_a_pad(args, x+i+i1, y+j+3); } b += im * 4; } j += 4; } if(j + 1 < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*2 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*2 + 1] = get_a_pad(args, x+i+i1, y+j+1); } b += im * 2; } j += 2; } if(j < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) b[i1] = get_a_pad(args, x+i+i1, y+j); b += im; } } } static void icopy_operation_nopad(int m, int n, struct sgemmargs *args, int x, int y, float *b) { int i, i1, j, im; for(j = 0; j + 15 < n; j += 16) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*16 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*16 + 1] = get_a_nopad(args, x+i+i1, y+j+1); b[i1*16 + 2] = get_a_nopad(args, x+i+i1, y+j+2); b[i1*16 + 3] = get_a_nopad(args, x+i+i1, y+j+3); b[i1*16 + 4] = get_a_nopad(args, x+i+i1, y+j+4); b[i1*16 + 5] = get_a_nopad(args, x+i+i1, y+j+5); b[i1*16 + 6] = get_a_nopad(args, x+i+i1, y+j+6); b[i1*16 + 7] = get_a_nopad(args, x+i+i1, y+j+7); b[i1*16 + 8] = get_a_nopad(args, x+i+i1, y+j+8); b[i1*16 + 9] = get_a_nopad(args, x+i+i1, y+j+9); b[i1*16 + 10] = get_a_nopad(args, x+i+i1, y+j+10); b[i1*16 + 11] = get_a_nopad(args, x+i+i1, y+j+11); b[i1*16 + 12] = get_a_nopad(args, x+i+i1, y+j+12); b[i1*16 + 13] = get_a_nopad(args, x+i+i1, y+j+13); b[i1*16 + 14] = get_a_nopad(args, x+i+i1, y+j+14); b[i1*16 + 15] = get_a_nopad(args, x+i+i1, y+j+15); } b += im * 16; } } if(j + 7 < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*8 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*8 + 1] = get_a_nopad(args, x+i+i1, y+j+1); b[i1*8 + 2] = get_a_nopad(args, x+i+i1, y+j+2); b[i1*8 + 3] = get_a_nopad(args, x+i+i1, y+j+3); b[i1*8 + 4] = get_a_nopad(args, x+i+i1, y+j+4); b[i1*8 + 5] = get_a_nopad(args, x+i+i1, y+j+5); b[i1*8 + 6] = get_a_nopad(args, x+i+i1, y+j+6); b[i1*8 + 7] = get_a_nopad(args, x+i+i1, y+j+7); } b += im * 8; } j += 8; } if(j + 3 < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*4 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*4 + 1] = get_a_nopad(args, x+i+i1, y+j+1); b[i1*4 + 2] = get_a_nopad(args, x+i+i1, y+j+2); b[i1*4 + 3] = get_a_nopad(args, x+i+i1, y+j+3); } b += im * 4; } j += 4; } if(j + 1 < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*2 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*2 + 1] = get_a_nopad(args, x+i+i1, y+j+1); } b += im * 2; } j += 2; } if(j < n) { for(i = 0; i < m; i += 16) { im = m - i > 16 ? 16 : m - i; for(i1 = 0; i1 < im; i1++) b[i1] = get_a_nopad(args, x+i+i1, y+j); b += im; } } } #else static void icopy_operation_pad(int m, int n, struct sgemmargs *args, int x, int y, float *b) { int i, i1, j, im; for(j = 0; j + 7 < n; j += 8) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*8 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*8 + 1] = get_a_pad(args, x+i+i1, y+j+1); b[i1*8 + 2] = get_a_pad(args, x+i+i1, y+j+2); b[i1*8 + 3] = get_a_pad(args, x+i+i1, y+j+3); b[i1*8 + 4] = get_a_pad(args, x+i+i1, y+j+4); b[i1*8 + 5] = get_a_pad(args, x+i+i1, y+j+5); b[i1*8 + 6] = get_a_pad(args, x+i+i1, y+j+6); b[i1*8 + 7] = get_a_pad(args, x+i+i1, y+j+7); } b += im * 8; } } if(j + 3 < n) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*4 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*4 + 1] = get_a_pad(args, x+i+i1, y+j+1); b[i1*4 + 2] = get_a_pad(args, x+i+i1, y+j+2); b[i1*4 + 3] = get_a_pad(args, x+i+i1, y+j+3); } b += im * 4; } j += 4; } if(j + 1 < n) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*2 + 0] = get_a_pad(args, x+i+i1, y+j); b[i1*2 + 1] = get_a_pad(args, x+i+i1, y+j+1); } b += im * 2; } j += 2; } if(j < n) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) b[i1] = get_a_pad(args, x+i+i1, y+j); b += im; } } } static void icopy_operation_nopad(int m, int n, struct sgemmargs *args, int x, int y, float *b) { int i, i1, j, im; for(j = 0; j + 7 < n; j += 8) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*8 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*8 + 1] = get_a_nopad(args, x+i+i1, y+j+1); b[i1*8 + 2] = get_a_nopad(args, x+i+i1, y+j+2); b[i1*8 + 3] = get_a_nopad(args, x+i+i1, y+j+3); b[i1*8 + 4] = get_a_nopad(args, x+i+i1, y+j+4); b[i1*8 + 5] = get_a_nopad(args, x+i+i1, y+j+5); b[i1*8 + 6] = get_a_nopad(args, x+i+i1, y+j+6); b[i1*8 + 7] = get_a_nopad(args, x+i+i1, y+j+7); } b += im * 8; } } if(j + 3 < n) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*4 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*4 + 1] = get_a_nopad(args, x+i+i1, y+j+1); b[i1*4 + 2] = get_a_nopad(args, x+i+i1, y+j+2); b[i1*4 + 3] = get_a_nopad(args, x+i+i1, y+j+3); } b += im * 4; } j += 4; } if(j + 1 < n) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) { b[i1*2 + 0] = get_a_nopad(args, x+i+i1, y+j); b[i1*2 + 1] = get_a_nopad(args, x+i+i1, y+j+1); } b += im * 2; } j += 2; } if(j < n) { for(i = 0; i < m; i += 8) { im = m - i > 8 ? 8 : m - i; for(i1 = 0; i1 < im; i1++) b[i1] = get_a_nopad(args, x+i+i1, y+j); b += im; } } } #endif #endif static void icopy_operation(int m, int n, struct sgemmargs *args, int x, int y, float *b) { if(args->padW || args->padH) icopy_operation_pad(m, n, args, x, y, b); else icopy_operation_nopad(m, n, args, x, y, b); } static job_t job[MAX_CPU_NUMBER]; static int gemm_thread(long mypos, long nthreads, struct sgemmargs *args, long *range_m, long *range_n) { float *buffer[DIVIDE_RATE]; float *sa, *sb; long m_from, m_to, n_from, n_to; long xxx, bufferside; long ls, min_l, jjs, min_jj; long is, min_i, div_n; long i, current; long l1stride; char transa = args->transa; char transb = args->transb; long m = args->m; long n = args->n; long k = args->k; float alpha = args->alpha; float beta = args->beta; float *a = args->a; float *b = args->b; float *c = args->c; long lda = args->lda; long ldb = args->ldb; long ldc = args->ldc; #ifdef TIMING unsigned long rpcc_counter; unsigned long copy_A = 0; unsigned long copy_B = 0; unsigned long kernel = 0; unsigned long waiting1 = 0; unsigned long waiting2 = 0; unsigned long waiting3 = 0; unsigned long waiting6[MAX_CPU_NUMBER]; unsigned long ops = 0; for (i = 0; i < num_threads; i++) waiting6[i] = 0; #endif sa = saa[mypos]; sb = sba[mypos]; m_from = 0; m_to = m; if(range_m) { m_from = range_m[mypos + 0]; m_to = range_m[mypos + 1]; } n_from = 0; n_to = n; if (range_n) { n_from = range_n[mypos + 0]; n_to = range_n[mypos + 1]; } if(beta != 1) BETA_OPERATION(m_from, m_to, 0, n, beta, c, ldc); if(k == 0 || alpha == 0) return 0; #if 0 fprintf(stderr, "Thread[%ld] m_from : %ld m_to : %ld n_from : %ld n_to : %ld\n", mypos, m_from, m_to, n_from, n_to); fprintf(stderr, "GEMM: P = %4ld Q = %4ld R = %4ld\n", (long)GEMM_P, (long)GEMM_Q, (long)GEMM_R); #endif div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE; buffer[0] = sb; for (i = 1; i < DIVIDE_RATE; i++) buffer[i] = buffer[i - 1] + GEMM_Q * ((div_n + GEMM_UNROLL_N - 1) & ~(GEMM_UNROLL_N - 1)); for(ls = 0; ls < k; ls += min_l) { min_l = k - ls; if (min_l >= GEMM_Q * 2) min_l = GEMM_Q; else if (min_l > GEMM_Q) min_l = (min_l + 1) / 2; l1stride = 1; min_i = m_to - m_from; if (min_i >= GEMM_P * 2) min_i = GEMM_P; else if(min_i > GEMM_P) min_i = (min_i / 2 + GEMM_UNROLL_M - 1) & ~(GEMM_UNROLL_M - 1); else if (nthreads == 1) l1stride = 0; START_RPCC(); //printf("icopy%ld (%ld,%ld)%ld (%ld,%ld)\n", mypos, min_l, min_i, lda, ls, m_from); if(transa) ICOPYT_OPERATION(min_l, min_i, a, lda, ls, m_from, sa); else if(args->ks0) icopy_operation(min_l, min_i, args, ls, m_from, sa); else ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa); STOP_RPCC(copy_A); div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE; for (xxx = n_from, bufferside = 0; xxx < n_to; xxx += div_n, bufferside ++) { START_RPCC(); /* Make sure if no one is using buffer */ for (i = 0; i < nthreads; i++) while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;} STOP_RPCC(waiting1); for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj) { min_jj = MIN(n_to, xxx + div_n) - jjs; if(min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; else if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; START_RPCC(); if(transb) OCOPYT_OPERATION(min_l, min_jj, b, ldb, ls, jjs, buffer[bufferside] + min_l * (jjs - xxx) * l1stride); else OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs, buffer[bufferside] + min_l * (jjs - xxx) * l1stride); STOP_RPCC(copy_B); START_RPCC(); KERNEL_OPERATION(min_i, min_jj, min_l, alpha, sa, buffer[bufferside] + min_l * (jjs - xxx) * l1stride, c, ldc, m_from, jjs); STOP_RPCC(kernel); #ifdef TIMING ops += 2 * min_i * min_jj * min_l; #endif } for (i = 0; i < nthreads; i++) job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (long)buffer[bufferside]; WMB; } current = mypos; do { current ++; if(current >= nthreads) current = 0; div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE; for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { if (current != mypos) { START_RPCC(); /* thread has to wait */ while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;} STOP_RPCC(waiting2); START_RPCC(); KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha, sa, (float *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], c, ldc, m_from, xxx); STOP_RPCC(kernel); #ifdef TIMING ops += 2 * min_i * MIN(range_n[current + 1] - xxx, div_n) * min_l; #endif } if (m_to - m_from == min_i) job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; } } while (current != mypos); for(is = m_from + min_i; is < m_to; is += min_i) { min_i = m_to - is; if (min_i >= GEMM_P * 2) min_i = GEMM_P; else if (min_i > GEMM_P) min_i = ((min_i + 1) / 2 + GEMM_UNROLL_M - 1) & ~(GEMM_UNROLL_M - 1); START_RPCC(); //printf("icopya%ld (%ld,%ld)%ld (%ld,%ld)\n", mypos, min_l, min_i, lda, ls, is); if(transa) ICOPYT_OPERATION(min_l, min_i, a, lda, ls, is, sa); else if(args->ks0) icopy_operation(min_l, min_i, args, ls, is, sa); else ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa); STOP_RPCC(copy_A); current = mypos; do { div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE; for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { START_RPCC(); KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha, sa, (float *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], c, ldc, is, xxx); STOP_RPCC(kernel); #ifdef TIMING ops += 2 * min_i * MIN(range_n[current + 1] - xxx, div_n) * min_l; #endif if(is + min_i >= m_to) { /* Thread doesn't need this buffer any more */ job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; WMB; } } current ++; if(current >= nthreads) current = 0; } while (current != mypos); } } START_RPCC(); for (i = 0; i < nthreads; i++) for (xxx = 0; xxx < DIVIDE_RATE; xxx++) while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;} STOP_RPCC(waiting3); #ifdef TIMING long waiting = waiting1 + waiting2 + waiting3; long total = copy_A + copy_B + kernel + waiting; fprintf(stderr, "GEMM [%2ld] Copy_A : %6.2f Copy_B : %6.2f Wait1 : %6.2f Wait2 : %6.2f Wait3 : %6.2f Kernel : %6.2f", mypos, (double)copy_A /(double)total * 100., (double)copy_B /(double)total * 100., (double)waiting1 /(double)total * 100., (double)waiting2 /(double)total * 100., (double)waiting3 /(double)total * 100., (double)ops/(double)kernel / 4. * 100.); #if 0 fprintf(stderr, "GEMM [%2ld] Copy_A : %6.2ld Copy_B : %6.2ld Wait : %6.2ld\n", mypos, copy_A, copy_B, waiting); fprintf(stderr, "Waiting[%2ld] %6.2f %6.2f %6.2f\n", mypos, (double)waiting1/(double)waiting * 100., (double)waiting2/(double)waiting * 100., (double)waiting3/(double)waiting * 100.); #endif fprintf(stderr, "\n"); #endif return 0; } static int gemm_single(int mypos, struct sgemmargs *args) { long m_from, m_to, n_from, n_to; long ls, is, js; long min_l, min_i, min_j; long jjs, min_jj; float *sa = saa[mypos]; float *sb = sba[mypos]; long l1stride, gemm_p, l2size; char transa = args->transa; char transb = args->transb; long m = args->m; long n = args->n; long k = args->k; float alpha = args->alpha; float beta = args->beta; float *a = args->a; float *b = args->b; float *c = args->c; long lda = args->lda; long ldb = args->ldb; long ldc = args->ldc; #ifdef TIMING unsigned long long rpcc_counter; unsigned long long innercost = 0; unsigned long long outercost = 0; unsigned long long kernelcost = 0; double total; #endif m_from = 0; m_to = m; n_from = 0; n_to = n; if (beta != 1) BETA_OPERATION(m_from, m_to, n_from, n_to, beta, c, ldc); if((k == 0) || (alpha == 0)) return 0; l2size = GEMM_P * GEMM_Q; #if 0 fprintf(stderr, "GEMM(Single): M_from : %ld M_to : %ld N_from : %ld N_to : %ld k : %ld\n", m_from, m_to, n_from, n_to, k); fprintf(stderr, "GEMM(Single):: P = %4ld Q = %4ld R = %4ld\n", (long)GEMM_P, (long)GEMM_Q, (long)GEMM_R); // fprintf(stderr, "GEMM: SA .. %p SB .. %p\n", sa, sb); // fprintf(stderr, "A = %p B = %p C = %p\n\tlda = %ld ldb = %ld ldc = %ld\n", a, b, c, lda, ldb, ldc); #endif #ifdef TIMING innercost = 0; outercost = 0; kernelcost = 0; #endif for(js = n_from; js < n_to; js += GEMM_R) { min_j = n_to - js; if (min_j > GEMM_R) min_j = GEMM_R; for(ls = 0; ls < k; ls += min_l) { min_l = k - ls; if(min_l >= GEMM_Q * 2) { gemm_p = GEMM_P; min_l = GEMM_Q; } else { if(min_l > GEMM_Q) min_l = (min_l / 2 + GEMM_UNROLL_M - 1) & ~(GEMM_UNROLL_M - 1); gemm_p = ((l2size / min_l + GEMM_UNROLL_M - 1) & ~(GEMM_UNROLL_M - 1)); while (gemm_p * min_l > l2size) gemm_p -= GEMM_UNROLL_M; } /* First, we have to move data A to L2 cache */ min_i = m_to - m_from; l1stride = 1; if(min_i >= GEMM_P * 2) min_i = GEMM_P; else if(min_i > GEMM_P) min_i = (min_i / 2 + GEMM_UNROLL_M - 1) & ~(GEMM_UNROLL_M - 1); else l1stride = 0; START_RPCC(); if(transa) ICOPYT_OPERATION(min_l, min_i, a, lda, ls, m_from, sa); else if(args->ks0) icopy_operation(min_l, min_i, args, ls, m_from, sa); else ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa); STOP_RPCC(innercost); for(jjs = js; jjs < js + min_j; jjs += min_jj) { min_jj = min_j + js - jjs; if(min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; else if(min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; START_RPCC(); if(transb) OCOPYT_OPERATION(min_l, min_jj, b, ldb, ls, jjs, sb + min_l * (jjs - js) * l1stride); else OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs, sb + min_l * (jjs - js) * l1stride); STOP_RPCC(outercost); START_RPCC(); KERNEL_OPERATION(min_i, min_jj, min_l, alpha, sa, sb + min_l * (jjs - js) * l1stride, c, ldc, m_from, jjs); STOP_RPCC(kernelcost); } for(is = m_from + min_i; is < m_to; is += min_i) { min_i = m_to - is; if(min_i >= GEMM_P * 2) min_i = GEMM_P; else if(min_i > GEMM_P) min_i = (min_i / 2 + GEMM_UNROLL_M - 1) & ~(GEMM_UNROLL_M - 1); START_RPCC(); if(transa) ICOPYT_OPERATION(min_l, min_i, a, lda, ls, is, sa); else if(args->ks0) icopy_operation(min_l, min_i, args, ls, is, sa); else ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa); STOP_RPCC(innercost); START_RPCC(); KERNEL_OPERATION(min_i, min_j, min_l, alpha, sa, sb, c, ldc, is, js); STOP_RPCC(kernelcost); } /* end of is */ } /* end of js */ } /* end of ls */ #ifdef TIMING total = (double)outercost + (double)innercost + (double)kernelcost; printf( "Copy A : %5.2f Copy B: %5.2f Kernel : %5.2f kernel Effi. : %5.2f Total Effi. : %5.2f\n", innercost / total * 100., outercost / total * 100., kernelcost / total * 100., (double)(m_to - m_from) * (double)(n_to - n_from) * (double)k / (double)kernelcost * 100. / 2., (double)(m_to - m_from) * (double)(n_to - n_from) * (double)k / total * 100. / 2.); #endif return 0; } void sgemmargs(struct sgemmargs *args) { long range_M[MAX_CPU_NUMBER + 1]; long range_N[MAX_CPU_NUMBER + 1]; long num_cpu_m, num_cpu_n; long width, i, j, k1, js; long m1, n1, n_from, n_to; range_M[0] = 0; m1 = args->m; num_cpu_m = 0; if(omp_in_parallel()) { if(args->ks0 && omp_get_thread_num() == 0) calctabs(args); #pragma omp barrier gemm_single(omp_get_thread_num(), args); #pragma omp barrier if(args->ks0 && omp_get_thread_num() == 0) { free(xtab); free(ytab); } return; } if(args->ks0) calctabs(args); if((args->m < threads_num * SWITCH_RATIO) || (args->n < threads_num * SWITCH_RATIO)) { gemm_single(0, args); if(args->ks0) { free(xtab); free(ytab); } return; } while(m1 > 0) { width = (m1 + threads_num - num_cpu_m - 1) / (threads_num - num_cpu_m); m1 -= width; if(m1 < 0) width = width + m1; range_M[num_cpu_m + 1] = range_M[num_cpu_m] + width; num_cpu_m++; } n_from = 0; n_to = args->n; for(js = n_from; js < n_to; js += GEMM_R * threads_num) { n1 = n_to - js; if (n1 > GEMM_R * threads_num) n1 = GEMM_R * threads_num; range_N[0] = js; num_cpu_n = 0; while (n1 > 0) { width = (n1 + threads_num - num_cpu_n - 1) / (threads_num - num_cpu_n); n1 -= width; if(n1 < 0) width = width + n1; range_N[num_cpu_n + 1] = range_N[num_cpu_n] + width; num_cpu_n++; } for (j = 0; j < num_cpu_m; j++) for (i = 0; i < num_cpu_m; i++) for (k1 = 0; k1 < DIVIDE_RATE; k1++) job[j].working[i][CACHE_LINE_SIZE * k1] = 0; #pragma omp parallel for for(i = 0; i < threads_num; i++) gemm_thread(i, threads_num, args, range_M, range_N); } if(args->ks0) { free(xtab); free(ytab); } } void sgemm(char transa, char transb, long m, long n, long k, float alpha, float *a, long lda, float *b, long ldb, float beta, float *c, long ldc) { struct sgemmargs args; args.transa = transa == 't'; args.transb = transb == 't'; args.m = m; args.n = n; args.k = k; args.alpha = alpha; args.a = a; args.lda = lda; args.b = b; args.ldb = ldb; args.beta = beta; args.c = c; args.ldc = ldc; args.ks0 = 0; sgemmargs(&args); } void blas_init() { int i; if(!threads_num) threads_num = omp_get_max_threads(); if(threads_num > MAX_CPU_NUMBER) threads_num = MAX_CPU_NUMBER; for(i = 0; i < threads_num; i++) { saa[i] = malloc(BUFFER_SIZE); sba[i] = (float *)((char *)saa[i] + ((GEMM_P * GEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)); } }
GB_binop__eq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint8) // A*D function (colscale): GB (_AxD__eq_uint8) // D*A function (rowscale): GB (_DxB__eq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint8) // C=scalar+B GB (_bind1st__eq_uint8) // C=scalar+B' GB (_bind1st_tran__eq_uint8) // C=A+scalar GB (_bind2nd__eq_uint8) // C=A'+scalar GB (_bind2nd_tran__eq_uint8) // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
yolov2_forward_network_quantized_origin.c
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #define GEMMCONV //#define SSE41 //#undef AVX #define MAX_VAL_8 (256/2 - 1) // 7-bit (1-bit sign) #define MAX_VAL_16 (256*256/2 - 1) // 15-bit (1-bit sign) #define MAX_VAL_32 (256*256*256*256/2 - 1) // 31-bit (1-bit sign) int max_abs(int src, int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } short int max_abs_short(short int src, short int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } // im2col.c int8_t im2col_get_pixel_int8(int8_t *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } // im2col.c //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_int8(int8_t* data_im, int channels, int height, int width, int ksize, int stride, int pad, int8_t* data_col) { int c, h, w; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j], MAX_VAL_16); c_tmp[j] = 0; } } free(c_tmp); } void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int32_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j], MAX_VAL_32); c_tmp[j] = 0; } } free(c_tmp); } void forward_convolutional_layer_q(layer l, network_state state) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, j; int const out_size = out_h*out_w; typedef int16_t conv_t; // l.output conv_t *output_q = calloc(l.outputs, sizeof(conv_t)); state.input_int8 = (int8_t *)calloc(l.inputs, sizeof(int)); int z; for (z = 0; z < l.inputs; ++z) { int16_t src = state.input[z] * l.input_quant_multiplier; state.input_int8[z] = max_abs(src, MAX_VAL_8); } // Convolution int m = l.n; int k = l.size*l.size*l.c; int n = out_h*out_w; int8_t *a = l.weights_int8; int8_t *b = (int8_t *)state.workspace; conv_t *c = output_q; // int16_t // Use GEMM (as part of BLAS) im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); int t; // multi-thread gemm #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); } free(state.input_int8); // Bias addition int fil; for (fil = 0; fil < l.n; ++fil) { for (j = 0; j < out_size; ++j) { output_q[fil*out_size + j] = output_q[fil*out_size + j] + l.biases_quant[fil]; } } // Activation if (l.activation == LEAKY) { for (i = 0; i < l.n*out_size; ++i) { output_q[i] = (output_q[i] > 0) ? output_q[i] : output_q[i] / 10; } } // De-scaling float ALPHA1 = 1 / (l.input_quant_multiplier * l.weights_quant_multiplier); for (i = 0; i < l.outputs; ++i) { l.output[i] = output_q[i] * ALPHA1; } // saving l.output == next layer input // int abc; // char outfile[30]; // sprintf(outfile, "weights/CONV%d_OUT.txt",l.index); // FILE *fp_out = fopen(outfile, "w"); // for (abc = 0; abc < out_size * l.n; abc++){ // fprintf(fp_out, "%x", l.output[abc]); // } free(output_q); } void yolov2_forward_network_q(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_q(l, state); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; // Saving output // int out_h = (l.h + 2 * l.pad - l.size) + 1; // int out_w = (l.w + 2 * l.pad - l.size) + 1; // int output_size = out_h * out_w; // int k; // char outfile[30]; // sprintf(outfile, "weights/CONV%d_OUT.txt",i); // FILE *fp_out = fopen(outfile, "w"); // for (k = 0; k < output_size; k++){ // fprintf(fp_out, "%x\n", l.output[k]); // } } } // detect on CPU float *network_predict_quantized(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; yolov2_forward_network_q(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } //mean function for z-norm float _mean(float *weights, int filter_num, int weight_size){ float sum = 0; float mean; for (int i = 0; i < weight_size; i++){ sum += weights[filter_num * weight_size + i]; } mean = sum / weight_size; return mean; } // variance function for z-norm float _variance(float *weights, int filter_num, float mean, int weight_size){ float deviation_square_sum = 0; float variance; for (int i=0; i < weight_size; i++){ deviation_square_sum += pow(weights[filter_num * weight_size + i] - mean, 2); } variance = deviation_square_sum / weight_size; return variance; } // standard deviation function for z-norm float _std_deviation(float variance){ float std_deviation = sqrt(variance); return std_deviation; } // get min-max from weights float get_minmax(layer *l, int weights_size){ float max = 0; float min = 0; for (int i = 0; i < weights_size; i++){ if (max < l->weights[i]) max = l->weights[i]; if (min > l->weights[i]) min = l->weights[i]; } printf("The value of minmax : %f\n", max - min); return max - min; } // get mean float get_mean(layer *l, int weights_size){ float sum = 0; for (int i=0; i<weights_size; i++){ sum += l->weights[i]; } return sum / weights_size; } //get variance float get_variance(layer *l, int weights_size){ float deviation_square_sum = 0; float variance; float mean = get_mean(l, weights_size); for (int i=0; i < weights_size; i++){ deviation_square_sum += pow(l->weights[i] - mean, 2); } variance = deviation_square_sum / weights_size; return variance; } // get std_deviation float get_std_deviation(layer *l, int weights_size){ return sqrt(get_variance(l, weights_size)); } // This function contaminates original weights. // Have to additional work for contamination problem. void do_normalization(layer *l, char *method){ // get copy of weights printf("check 2\n"); size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; float copied_weights[weights_size]; int fil, i; // memcpy(copied_weights, l->weights, sizeof(l->weights)); printf("check 3\n"); for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { copied_weights[fil*filter_size + i] = l->weights[fil*filter_size + i]; } } // debug handling part // printf("*** This is preview for weights ***\n"); // for (i=0; i<10; i++){ // printf("weight[%d] : [%f]\n", i, copied_weights[i]); // } // if method is minmax // 22.03.04 below minmax working distinguish of filter. if (strcmp(method, "minmax") == 0){ float norm_weight; int j; // per filter for (fil = 0; fil < l->n; ++fil) { float max = 0; float min = 0; // get min, max of weights for (i = 0; i < filter_size; ++i) { if (max < copied_weights[fil*filter_size + i]) max = copied_weights[fil*filter_size + i]; if (min > copied_weights[fil*filter_size + i]) min = copied_weights[fil*filter_size + i]; } printf("min, max : %f %f\n", min, max); // do min-max normalization for (i = 0; i < filter_size; ++i) { norm_weight = (copied_weights[fil*filter_size + i] - min) / (max - min); copied_weights[fil*filter_size + i] = norm_weight; } } // debug handling part // printf("*** This is preview for normalized weights ***\n"); // for (i=0; i<10; i++){ // printf("weight[%d] : [%f]\n", i, copied_weights[i]); // } // re-copy to origin weight // memcpy(l->weights, copied_weights, sizeof(l->weights)); for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { l->weights[fil*filter_size + i] = copied_weights[fil*filter_size + i]; } } } // if method is znorm // 22.03.05 below code is about znorm if (strcmp(method, "znorm") == 0){ // per filter for (fil = 0; fil < l->n; ++fil) { // get mean, variance, std_deviation float mean = _mean(copied_weights, fil, filter_size); float variance = _variance(copied_weights, fil, mean, filter_size); float std_deviation = _std_deviation(variance); printf("[filter num : %d][mean : %f] [variance : %f] [standard deviation : %f]\n", fil, mean, variance, std_deviation); // do z-normalization for (i = 0; i < filter_size; ++i) { float norm_weight = (copied_weights[fil*filter_size + i] - mean) / std_deviation; copied_weights[fil*filter_size + i] = norm_weight; } } // re-copy to origin weight for (i = 0; i < weights_size; i++){ l->weights[i] = copied_weights[i]; } } } /* Quantization-related */ void do_quantization(network net) { int counter = 0; char* method = "znorm"; // minmax, znorm int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; /* TODO: implement quantization The implementation given below is a naive version of per-network quantization; implement your own quantization that minimizes the mAP degradation */ printf("\n"); if (l->type == CONVOLUTIONAL) { // Quantize conv layer only size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; // float layer_std_weight[11] = {}; float layer_minmax_weight[11] = {256.032, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997, 92.29997}; // {1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1} => 64.16% // {1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1} => 64.06% // {1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1} => 64.19% // {1, 2, 4, 1, 1, 4, 1, 1, 1, 1, 1} => 64.22% // {1, 2, 2, 2, 1, 4, 1, 1, 1, 1, 1} => 64.28% // {1, 2, 2, 1, 1, 4, 1, 1, 1, 1, 1} => 64.52% // {1, 2, 2, 1, 1, 4, 4, 1, 1, 1, 1} => 65.28% // {1, 2, 2, 1, 1, 4, 2, 1, 1, 1, 1} => 65.92% // {1, 2, 2, 1, 1, 4, 3, 1, 1, 1, 1} => 66.54% // {1, 2, 2, 1, 1, 4, 3, 1, 1.5, 1, 1} => 66.75% // {1, 2, 2, 1, 1, 4, 3, 1, 1.5, 2, 1} => 67.23% // {2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 64% // {2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 65% // {3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 69.74% // {4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 70.35% // {4.0005, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} => 70.63% // below codes are for debugging // printf("%d\n", sizeof(l->weights)*weights_size); // printf("%d\n", sizeof(l->weights)); // printf("%d\n", sizeof(l->weights[0])); // printf("%d\n", sizeof(float)); int i, fil; // Input Scaling if (counter >= net.input_calibration_size) { printf(" Warning: CONV%d has no corresponding input_calibration parameter - default value 16 will be used;\n", j); } l->input_quant_multiplier = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // Using 16 as input_calibration as default value // l->input_quant_multiplier = floor(l->input_quant_multiplier*pow(2,12))/pow(2,12); ++counter; printf("check 1\n"); // Weight Quantization // do_normalization(l, method); // for (fil = 0; fil < l->n; ++fil) { // for (i = 0; i < filter_size; ++i) { // float w = (l->weights[fil*filter_size + i] - 0.5) * 127; // Scale // l->weights_int8[fil*filter_size + i] = max_abs(w, MAX_VAL_8); // Clip // } // } // Below annotation is a pure-skeleton code. l->weights_quant_multiplier = 1 / get_minmax(l, weights_size) * layer_minmax_weight[counter - 1]; // l->weights_quant_multiplier = 1 / get_std_deviation(l, weights_size); // Arbitrarily set to 32; you should devise your own method to calculate the weight multiplier for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { float w = l->weights[fil*filter_size + i] * l->weights_quant_multiplier; // Scale l->weights_int8[fil*filter_size + i] = max_abs(w, MAX_VAL_8); // Clip } } // Bias Quantization float biases_multiplier = (l->weights_quant_multiplier * l->input_quant_multiplier); for (fil = 0; fil < l->n; ++fil) { float b = l->biases[fil] * biases_multiplier; // Scale l->biases_quant[fil] = max_abs(b, MAX_VAL_16); // Clip } printf(" CONV%d multipliers: input %g, weights %g, bias %g \n", j, l->input_quant_multiplier, l->weights_quant_multiplier, biases_multiplier); } else { printf(" No quantization for layer %d (layer type: %d) \n", j, l->type); } } } // Save quantized weights, bias, and scale void save_quantized_model(network net) { int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; printf(" Saving quantized weights, bias, and scale for CONV%d \n", j); char weightfile[30]; char biasfile[30]; char scalefile[30]; char origin_weightfile[30]; char outfile[30]; sprintf(weightfile, "weights/CONV%d_W.txt", j); sprintf(biasfile, "weights/CONV%d_B.txt", j); sprintf(scalefile, "weights/CONV%d_S.txt", j); sprintf(origin_weightfile, "weights/CONV%d_ORIGIN.txt", j); // sprintf(outfile, "weights/CONV%d_OUT.txt", j); int k; // int out_h = (l->h + 2 * l->pad - l->size) / l->stride + 1; // int out_w = (l->w + 2 * l->pad - l->size) / l->stride + 1; // int output_size = out_h * out_w; // FILE *fp_out = fopen(outfile, "w"); // for (k = 0; k < output_size * l->n; k++){ // fprintf(fp_out, "%x\n", l->output[k]); // } FILE *fp_ori = fopen(origin_weightfile, "w"); for (k = 0; k < weights_size; k++){ // float origin_weight = k < weight_size ? l->weights[k] : 0; fprintf(fp_ori, "%f\n", l->weights[k]); } fclose(fp_ori); FILE *fp_w = fopen(weightfile, "w"); for (k = 0; k < weights_size; k = k + 4) { uint8_t first = k < weights_size ? l->weights_int8[k] : 0; uint8_t second = k+1 < weights_size ? l->weights_int8[k+1] : 0; uint8_t third = k+2 < weights_size ? l->weights_int8[k+2] : 0; uint8_t fourth = k+3 < weights_size ? l->weights_int8[k+3] : 0; fprintf(fp_w, "%02x%02x%02x%02x\n", first, second, third, fourth); } fclose(fp_w); FILE *fp_b = fopen(biasfile, "w"); for (k = 0; k < l->n; k = k + 4) { uint16_t first = k < l->n ? l->biases_quant[k] : 0; uint16_t second = k+1 < l->n ? l->biases_quant[k+1] : 0; fprintf(fp_b, "%04x%04x\n", first, second); } fclose(fp_b); FILE *fp_s = fopen(scalefile, "w"); fprintf(fp_s, "%f\n", l->input_quant_multiplier); fclose(fp_s); } } }
omp_fill_taskqueue.c
// RUN: %libomp-compile && env KMP_ENABLE_TASK_THROTTLING=0 %libomp-run // RUN: %libomp-compile && env KMP_ENABLE_TASK_THROTTLING=1 %libomp-run // REQUIRES: !abt && !icc #include<omp.h> #include<stdlib.h> #include<string.h> /** * Test the task throttling behavior of the runtime. * Unless OMP_NUM_THREADS is 1, the master thread pushes tasks to its own tasks * queue until either of the following happens: * - the task queue is full, and it starts serializing tasks * - all tasks have been pushed, and it can begin execution * The idea is to create a huge number of tasks which execution are blocked * until the master thread comes to execute tasks (they need to be blocking, * otherwise the second thread will start emptying the queue). * At this point we can check the number of enqueued tasks: iff all tasks have * been enqueued, then there was no task throttling. * Otherwise there has been some sort of task throttling. * If what we detect doesn't match the value of the environment variable, the * test is failed. */ #define NUM_TASKS 2000 int main() { int i; int block = 1; int throttling = strcmp(getenv("KMP_ENABLE_TASK_THROTTLING"), "1") == 0; int enqueued = 0; int failed = -1; #pragma omp parallel num_threads(2) #pragma omp master { for (i = 0; i < NUM_TASKS; i++) { enqueued++; #pragma omp task { int tid; tid = omp_get_thread_num(); if (tid == 0) { // As soon as the master thread starts executing task we should unlock // all tasks, and detect the test failure if it has not been done yet. if (failed < 0) failed = throttling ? enqueued == NUM_TASKS : enqueued < NUM_TASKS; #pragma omp atomic write block = 0; } int wait = 0; do { #pragma omp atomic read wait = block; } while (wait); } } block = 0; } return failed; }
dpbsv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpbsv.c, normal z -> d, Fri Sep 28 17:38:08 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_pbsv * * Computes the solution to a system of linear equations A * X = B, * where A is an n-by-n symmetric positive definite band matrix, and X and B * are n-by-nrhs matrices. The Cholesky decomposition is used to factor A as * * \f[ A = L\times L^T, \f] if uplo = PlasmaLower, * or * \f[ A = U^T\times U, \f] if uplo = PlasmaUpper, * * where U is an upper triangular matrix and L is a lower triangular matrix. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] kd * The number of subdiagonals within the band of A if uplo=upper. * The number of suuperdiagonals within the band of A. ku >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns * of the matrix B. nrhs >= 0. * * @param[in,out] AB * On entry, the upper or lower triangle of the symmetric band * matrix A, stored in the first KD+1 rows of the array. The * j-th column of A is stored in the j-th column of the array AB * as follows: * if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j; * if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd). * \n * On exit, if INFO = 0, the triangular factor U or L from the * Cholesky factorization A = U^T*U or A = L*L^T of the band * matrix A, in the same storage format as A. * * @param[in] ldab * The leading dimension of the array AB. ldab >= max(1,n). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_dpbsv * @sa plasma_cpbsv * @sa plasma_dpbsv * @sa plasma_spbsv * ******************************************************************************/ int plasma_dpbsv(plasma_enum_t uplo, int n, int kd, int nrhs, double *pAB, int ldab, double *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kd < 0) { plasma_error("illegal value of kd"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (ldab < kd+1) { plasma_error("illegal value of ldab"); return -6; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -8; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_pbtrf(plasma, PlasmaRealDouble, n); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. int lm = nb*(1+(kd+nb-1)/nb); plasma_desc_t AB; plasma_desc_t B; int retval; retval = plasma_desc_general_band_create(PlasmaRealDouble, uplo, nb, nb, lm, n, 0, 0, n, n, kd, kd, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, ldb, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&AB); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dpb2desc(pAB, ldab, AB, &sequence, &request); plasma_omp_dge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_dpbsv(uplo, AB, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2pb(AB, pAB, ldab, &sequence, &request); plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&AB); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_pbsv * * Solves a symmetric positive definite band system of linear equations * using Cholesky factorization. * Non-blocking tile version of plasma_dpbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in,out] AB * Descriptor of matrix A. * * @param[in,out] B * Descriptor of right-hand-sides B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dpbsv * @sa plasma_omp_cpbsv * @sa plasma_omp_dpbsv * @sa plasma_omp_spbsv * ******************************************************************************/ void plasma_omp_dpbsv(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pdpbtrf(uplo, AB, sequence, request); plasma_pdtbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); plasma_pdtbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); }
atomic.c
#include <stdio.h> #include <omp.h> int sum = 0; int main(int argc, char const *argv[]) { #pragma omp parallel num_threads(3) { #pragma omp atomic sum += 10; #pragma omp barrier printf("%d\n", sum); } return 0; }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *composite_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *composite_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the destination image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o composite_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'composite_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'composite_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the composite_image. Typically used for repeated tiling % of the composite_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for destination preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; ** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also definate that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity* p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification. */ if (Sca*Da + Dca*Sa <= Sa*Da) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa)); #else /* March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs(p->opacity - q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=fabs(p->red - q->red); if ( (channel & GreenChannel) != 0 ) composite->green=fabs(p->green - q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs(p->blue - q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs(p->index - q->index); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Exclusion(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Exclusion(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Exclusion(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Exclusion(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType ConvertHueToRGB(MagickRealType m1, MagickRealType m2,MagickRealType hue) { if (hue < 0.0) hue+=1.0; if (hue > 1.0) hue-=1.0; if ((6.0*hue) < 1.0) return(m1+6.0*(m2-m1)*hue); if ((2.0*hue) < 1.0) return(m2); if ((3.0*hue) < 2.0) return(m1+6.0*(m2-m1)*(2.0/3.0-hue)); return(m1); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ return(Sca + Dca - 2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel=p+q; if (pixel > QuantumRange) pixel-=(QuantumRange+1.0); return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q, const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa, const MagickRealType q, const MagickRealType Da) { MagickRealType pixel; pixel=p-q; if (pixel < 0.0) pixel+=(QuantumRange+1.0); return(pixel*Sa*Da + p*Sa*(1-Da) + q*Da*(1-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q, const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2*Sca)/Da+Sca*(2*Dca+1-Da)+Dca*(1-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification -- was found to be incorrect See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html. */ if (2.0*Sca < Sa) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (8.0*Dca <= Da) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); #else MagickRealType alpha, beta; /* New specification: March 2009 SVG specification. */ alpha=Dca/Da; if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); #endif } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Depreciated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1-Da)+Dca*(1-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *composite_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,composite_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *composite_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *composite_image, *destination_image; MagickBooleanType clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, destination_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); composite_image=CloneImage(composite,0,0,MagickTrue,exception); if (composite_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(composite_image,image->colorspace); GetMagickPixelPacket(image,&zero); destination_image=(Image *) NULL; amount=0.5; destination_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify destination outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (composite_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) composite_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) composite_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; composite_view=AcquireVirtualCacheView(composite_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(composite_image,image,composite_image->rows,1) #endif for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *composite_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, composite_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } composite_indexes=GetCacheViewVirtualIndexQueue(composite_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) CopyMagickMemory(q,p,composite_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (composite_indexes != (const IndexPacket *) NULL)) (void) CopyMagickMemory(indexes,composite_indexes, composite_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); composite_image=DestroyImage(composite_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify destination outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *composite_view, *destination_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ destination_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (destination_image == (Image *) NULL) { composite_image=DestroyImage(composite_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'", "compose:args",value); composite_image=DestroyImage(composite_image); destination_image=DestroyImage(destination_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; composite_view=AcquireVirtualCacheView(composite_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register PixelPacket *restrict r; register IndexPacket *restrict destination_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); for (x=0; x < (ssize_t) composite_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel); SetPixelPacket(destination_image,&pixel,r,destination_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); composite_view=DestroyCacheView(composite_view); destination_view=DestroyCacheView(destination_view); composite_image=DestroyImage(composite_image); composite_image=destination_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *composite_view, *destination_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *restrict destination_indexes; register PixelPacket *restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ destination_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (destination_image == (Image *) NULL) { composite_image=DestroyImage(composite_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue|HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (composite_image->columns-1.0)/ 2.0; vertical_scale=(MagickRealType) (composite_image->rows-1.0)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1.0)/2.0; vertical_scale=(MagickRealType) (image->rows-1.0)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(composite_image->columns-1.0)/200.0; vertical_scale*=(composite_image->rows-1.0)/200.0; } else { horizontal_scale*=(image->columns-1.0)/200.0; vertical_scale*=(image->rows-1.0)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+(composite_image->columns-1)/ 2.0); else center.x=((MagickRealType) image->columns-1)/2.0; else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) == 0) center.y=(MagickRealType) (y_offset+(composite_image->rows-1)/2.0); else center.y=((MagickRealType) image->rows-1)/2.0; else if ((flags & AspectValue) == 0) center.y=(MagickRealType) (y_offset+geometry_info.psi); else center.y=geometry_info.psi; } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); composite_view=AcquireVirtualCacheView(composite_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); for (x=0; x < (ssize_t) composite_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(destination_image,&pixel,r,destination_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) break; } destination_view=DestroyCacheView(destination_view); composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); composite_image=DestroyImage(composite_image); composite_image=destination_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; destination_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { destination_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) destination_dissolve=geometry_info.sigma/100.0; if ((destination_dissolve-MagickEpsilon) < 0.0) destination_dissolve=0.0; clip_to_self=MagickFalse; if ((destination_dissolve+MagickEpsilon) > 1.0 ) { destination_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; destination_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) destination_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((destination_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is depreciated */ value=GetImageArtifact(composite_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(composite_image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(composite_image,&zero); composite_view=AcquireVirtualCacheView(composite_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(composite_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, destination, source; register const IndexPacket *restrict composite_indexes; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) composite_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) composite_image->rows)) { p=GetCacheViewVirtualPixels(composite_view,0,y-y_offset, composite_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); composite_indexes=GetCacheViewVirtualIndexQueue(composite_view); GetMagickPixelPacket(composite_image,&source); GetMagickPixelPacket(image,&destination); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) composite_image->columns) break; } destination.red=(MagickRealType) GetPixelRed(q); destination.green=(MagickRealType) GetPixelGreen(q); destination.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) destination.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) destination.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { destination.red=(MagickRealType) QuantumRange-destination.red; destination.green=(MagickRealType) QuantumRange-destination.green; destination.blue=(MagickRealType) QuantumRange-destination.blue; destination.index=(MagickRealType) QuantumRange-destination.index; } /* Handle destination modifications outside overlaid region. */ composite=destination; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) composite_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange- destination_dissolve*(QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&destination,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(composite_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,ClampToQuantum(composite.red)); SetPixelGreen(q,ClampToQuantum(composite.green)); SetPixelBlue(q,ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto destination. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (composite_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (composite_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(composite_indexes+ x-x_offset); if (composite_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&destination,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&destination, destination.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&destination,destination.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&destination,&composite); break; } case DstInCompositeOp: { CompositeIn(&destination,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&destination,&composite); break; } case DstOutCompositeOp: { CompositeOut(&destination,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&destination,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&destination,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&destination,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&destination,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&destination,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&destination,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&destination,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&destination,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&destination,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&destination,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&destination,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&destination,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&destination,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&destination,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&destination,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&destination,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&destination,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&destination,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&destination,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&destination,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&destination,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&destination,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&destination,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&destination,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&destination,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&destination,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&destination,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&destination,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&destination,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&destination,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&destination) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&destination,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&destination, (MagickRealType) (QuantumRange-destination_dissolve*(QuantumRange- destination.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&destination, destination_dissolve,&composite); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&destination,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(destination.red,destination.green,destination.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(destination.red,destination.green,destination.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(destination.red,destination.green,destination.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(destination.red,destination.green,destination.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (destination.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(destination.red,destination.green,destination.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < destination.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) { composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); break; } composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,ClampToQuantum(composite.red)); SetPixelGreen(q,ClampToQuantum(composite.green)); SetPixelBlue(q,ClampToQuantum(composite.blue)); SetPixelOpacity(q,ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,ClampToQuantum(composite.index)); p++; if (p >= (pixels+composite_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); if (destination_image != (Image * ) NULL) destination_image=DestroyImage(destination_image); else composite_image=DestroyImage(composite_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,texture_image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) CopyMagickMemory(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) CopyMagickMemory(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
convolution_sgemm_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; #if __aarch64__ if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 16u, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator); #else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator); #endif { #if __aarch64__ int nn_size = size / 12; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x12 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); img0 += size * 4; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #else int nn_size = size >> 3; int remain_size_start = 0; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); #else float* tmpptr = tmp.channel(i / 8); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x8 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "sub %0, %0, #64 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x4 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vstm %1!, {d0-d7} \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { // transpose 4x2 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st2 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst2.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif // __aarch64__ img0 += size * 4; } } } } int remain_outch_start = 0; #if __aarch64__ int nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v0.s[1] \n" "fmla v26.4s, v9.4s, v0.s[2] \n" "fmla v27.4s, v9.4s, v0.s[3] \n" "fmla v28.4s, v9.4s, v1.s[0] \n" "fmla v29.4s, v9.4s, v1.s[1] \n" "fmla v30.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v17.4s, v10.4s, v2.s[1] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v2.s[3] \n" "fmla v20.4s, v10.4s, v3.s[0] \n" "fmla v21.4s, v10.4s, v3.s[1] \n" "fmla v22.4s, v10.4s, v3.s[2] \n" "fmla v23.4s, v10.4s, v3.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v24.4s, v11.4s, v2.s[0] \n" "fmla v25.4s, v11.4s, v2.s[1] \n" "fmla v26.4s, v11.4s, v2.s[2] \n" "fmla v27.4s, v11.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v28.4s, v11.4s, v3.s[0] \n" "fmla v29.4s, v11.4s, v3.s[1] \n" "fmla v30.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v17.4s, v12.4s, v4.s[1] \n" "fmla v18.4s, v12.4s, v4.s[2] \n" "fmla v19.4s, v12.4s, v4.s[3] \n" "fmla v20.4s, v12.4s, v5.s[0] \n" "fmla v21.4s, v12.4s, v5.s[1] \n" "fmla v22.4s, v12.4s, v5.s[2] \n" "fmla v23.4s, v12.4s, v5.s[3] \n" "fmla v24.4s, v13.4s, v4.s[0] \n" "fmla v25.4s, v13.4s, v4.s[1] \n" "fmla v26.4s, v13.4s, v4.s[2] \n" "fmla v27.4s, v13.4s, v4.s[3] \n" "fmla v28.4s, v13.4s, v5.s[0] \n" "fmla v29.4s, v13.4s, v5.s[1] \n" "fmla v30.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v17.4s, v14.4s, v6.s[1] \n" "fmla v18.4s, v14.4s, v6.s[2] \n" "fmla v19.4s, v14.4s, v6.s[3] \n" "fmla v20.4s, v14.4s, v7.s[0] \n" "fmla v21.4s, v14.4s, v7.s[1] \n" "fmla v22.4s, v14.4s, v7.s[2] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v6.s[0] \n" "fmla v25.4s, v15.4s, v6.s[1] \n" "fmla v26.4s, v15.4s, v6.s[2] \n" "fmla v27.4s, v15.4s, v6.s[3] \n" "fmla v28.4s, v15.4s, v7.s[0] \n" "fmla v29.4s, v15.4s, v7.s[1] \n" "fmla v30.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v0.s[1] \n" "fmla v22.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v16.4s, v10.4s, v1.s[0] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v1.s[2] \n" "fmla v19.4s, v10.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v20.4s, v11.4s, v1.s[0] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v1.s[2] \n" "fmla v23.4s, v11.4s, v1.s[3] \n" "fmla v16.4s, v12.4s, v2.s[0] \n" "fmla v17.4s, v12.4s, v2.s[1] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v2.s[3] \n" "fmla v20.4s, v13.4s, v2.s[0] \n" "fmla v21.4s, v13.4s, v2.s[1] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v2.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v3.s[0] \n" "fmla v17.4s, v14.4s, v3.s[1] \n" "fmla v18.4s, v14.4s, v3.s[2] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v3.s[0] \n" "fmla v21.4s, v15.4s, v3.s[1] \n" "fmla v22.4s, v15.4s, v3.s[2] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < size; i += 2) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v1.16b \n" "mov v19.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v0.s[3] \n" "fmla v18.4s, v11.4s, v0.s[2] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "fmla v16.4s, v12.4s, v1.s[0] \n" "fmla v17.4s, v12.4s, v1.s[1] \n" "fmla v18.4s, v13.4s, v1.s[0] \n" "fmla v19.4s, v13.4s, v1.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v1.s[2] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v1.s[2] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%10] \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v18.4s, v10.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v18.4s, v14.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } } #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; #if __aarch64__ for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p / 2 + p % 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < size; i += 8) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "vmov q12, q0 \n" "vmov q13, q0 \n" "vmov q14, q0 \n" "vmov q15, q0 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < size; i += 4) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v16.4s, v9.4s, v1.s[0] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v1.s[2] \n" "fmla v19.4s, v9.4s, v1.s[3] \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v17.4s, v10.4s, v2.s[1] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v2.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v3.s[0] \n" "fmla v17.4s, v11.4s, v3.s[1] \n" "fmla v18.4s, v11.4s, v3.s[2] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < size; i += 2) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v9.4s, v0.s[2] \n" "fmla v19.4s, v9.4s, v0.s[3] \n" "fmla v16.4s, v10.4s, v1.s[0] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v18.4s, v11.4s, v1.s[2] \n" "fmla v19.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "veor q10, q10 \n" "veor q11, q11 \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q5, d1[0] \n" "vmla.f32 q11, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q9, q6, d2[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q7, d3[0] \n" "vmla.f32 q11, q7, d3[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i < size; i++) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v16.4s}, [%8] \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v18.4s, v10.4s, v0.s[2] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v16.4s, v16.4s, v17.4s \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "vld1.f32 {d16-d17}, [%8] \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vadd.f32 q8, q8, q9 \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } } } static void convolution_im2col_sgemm_transform_kernel_pack4_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4b-4a-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); #if __aarch64__ kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4); #else kernel_tm.create(16 * maxk, inch / 4, outch / 4); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); const Mat k4 = kernel.channel(q + 4); const Mat k5 = kernel.channel(q + 5); const Mat k6 = kernel.channel(q + 6); const Mat k7 = kernel.channel(q + 7); float* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); #if __aarch64__ float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4); #else float* g00 = kernel_tm.channel(q / 4); #endif for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void convolution_im2col_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _val0 = vld1q_f32(sptr); float32x4_t _val1 = vld1q_f32(sptr + stride_w * 4); float32x4_t _val2 = vld1q_f32(sptr + stride_w * 8); float32x4_t _val3 = vld1q_f32(sptr + stride_w * 12); vst1q_f32(ptr, _val0); vst1q_f32(ptr + 4, _val1); vst1q_f32(ptr + 8, _val2); vst1q_f32(ptr + 12, _val3); sptr += stride_w * 16; ptr += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _val0 = vld1q_f32(sptr); float32x4_t _val1 = vld1q_f32(sptr + stride_w * 4); vst1q_f32(ptr, _val0); vst1q_f32(ptr + 4, _val1); sptr += stride_w * 8; ptr += 8; } for (; j < outw; j++) { float32x4_t _val = vld1q_f32(sptr); vst1q_f32(ptr, _val); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
rundata.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "gomptestdata.h" #define MAXN 5100 /* Max value of N */ //static int N; /* Matrix size */ static int procs; /* Number of processors to use */ /* Matrices and vectors */ //static volatile float A[MAXN][MAXN], B[MAXN], X[MAXN]; //static volatile float ORA[MAXN][MAXN], ORB[MAXN], ORX[MAXN]; //static float **ORB, **ORA; static float *B, **A, *X, *ORX; /* A * X = B, solve for X */ static int seed; /* Prototype */ static void gauss(); /* The function you will provide. * It is this routine that is timed. * It is called only on the parent. */ /* Initialize A and B (and X to 0.0s) */ static void initialize_inputs() { int row, col; //ORA = malloc(N * sizeof(float *)); A = malloc (N * sizeof(float *)); for(int i = 0; i < N ; i++ ){ //ORA[i] = malloc(N * sizeof(float)); A[i] = malloc(N * sizeof(float)); } //ORB = malloc(N * sizeof(float)); B = malloc(N * sizeof(float)); ORX = malloc(N * sizeof(float)); X = malloc(N * sizeof(float)); /* printf("\nInitializing...\n"); */ /* // #pragma omp parallel num_threads(8) */ /* { */ /* // #pragma omp for private(row,col) schedule(static,1) nowait */ /* for (col = 0; col < N; col++) { */ /* for (row = 0; row < N; row++) { */ /* ORA[col][row] = (float) rand()/32768.0; */ /* } */ /* ORB[col] = (float)rand()/32768.0; */ /* } */ /* } */ } static void reset_inputs(){ int row, col; printf("\n reseting...\n"); for (col = 0; col < N; col++) { for (row = 0; row < N; row++) { A[row][col] = ORA[row][col]; } B[col] = ORB[col]; X[col] = 0.0; } } static void print_state() { int row, col; printf("A=%p, B=%p, X=%p\n", A, B, X); if (N < 1000) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%f%s ", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } printf("\nB = ["); for (col = 0; col < N; col++) { printf("%5.2f%s", B[col], (col < N-1) ? "; " : "]\n"); } printf("\nX = ["); for (col = 0; col < N; col++) { printf("%f%s ", X[col], (col < N-1) ? "; " : "]\n"); } } } static void serialgauss(){ int norm, row, col; /* Normalization row, and zeroing * element row and col */ float multiplier; printf("Computing serially.\n"); /* Gaussian elimination */ for (norm = 0; norm < N - 1; norm++) { // int num = N - norm; { for (row = norm + 1; row < N; row++) { multiplier = A[row][norm] / A[norm][norm]; for (col = norm; col < N; col++) { A[row][col] -= A[norm][col] * multiplier; } B[row] -= B[norm] * multiplier; } printf("Print iteration norm %d .\n", norm); print_state(); } } /* (Diagonal elements are not normalized to 1. This is treated in back * substitution.) */ /* Back substitution */ for (row = N - 1; row >= 0; row--) { X[row] = B[row]; for (col = N-1; col > row; col--) { X[row] -= A[row][col] * X[col]; } X[row] /= A[row][row]; //printf("%5.2f ", X[row]); } } static void ompgauss() { int norm, row, col; /* Normalization row, and zeroing * element row and col */ float multiplier; //doneflag[0] = 1; printf("Computing using omp.\n"); /* Gaussian elimination */ #pragma omp parallel private(row, col, multiplier, norm) num_threads(procs) { for (norm = 0; norm < N - 1; norm++) { #pragma omp for schedule(static,1) for (row = norm + 1; row < N; row++) { multiplier = A[row][norm]/A[norm][norm]; for (col = norm; col < N; col++) { A[row][col] -= A[norm][col] * multiplier; } B[row] -= B[norm] * multiplier; } } } printf("I am done\n"); /* (Diagonal elements are not normalized to 1. This is treated in back * substitution.) */ /* Back substitution */ for (row = N - 1; row >= 0; row--) { X[row] = B[row]; for (col = N-1; col > row; col--) { X[row] -= A[row][col] * X[col]; } X[row] /= A[row][row]; } } #define MYTIME() (double)(clock())/(double)(CLOCKS_PER_SEC) int main (int argc, char *argv[]) { int seed, size, np; if(argc<3){ printf("Please input seed, nprocs.\n"); return -1; } seed = atoi(argv[1]); // size = atoi(argv[2]); np =atoi(argv[2]); srand(seed); procs = np; printf("seed %d, size, %d, nprocs: %d\n", seed, N, procs); initialize_inputs(); reset_inputs(); printf("state before openmp run\n"); double start = MYTIME(); //ompgauss(); double end = MYTIME(); printf("state after openmp run\n"); //print_state(); double omp = end-start; printf("openmp done %lf\n", omp); float OMP[N]; for(int row =0; row<N; row++){ OMP[row] = X[row]; } reset_inputs(); printf("state before serial run\n"); start = MYTIME(); serialgauss(); end = MYTIME(); printf("state after serial run\n"); //print_state(); double serial = end-start; printf("serial done %lf\n", serial); float difference = 0.0; for(int row =0; row<N; row++){ difference += (OMP[row]- X[row]); } printf("OMP difference %f speed up %f !\n", difference, serial/omp); return 0; }
DenseMatrix.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseMatrix.h // \brief Header file for the OpenMP-based dense matrix SMP implementation // // Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/AlignmentFlag.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseMatrix.h> #include <blaze/math/expressions/SparseMatrix.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/smp/ThreadMapping.h> #include <blaze/math/StorageOrder.h> #include <blaze/math/typetraits/IsDenseMatrix.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Submatrix.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side dense matrix to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side dense matrix , bool SO2 // Storage order of the right-hand side dense matrix , typename OP > // Type of the assignment operation void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<MT1>; using ET2 = ElementType_t<MT2>; constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<MT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads( omp_get_num_threads() ); const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) ); const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL ); const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 ); const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) ); const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) ); const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL ); const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 ); const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) ); const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0; i<threads; ++i ) { const size_t row ( ( i / threadmap.second ) * rowsPerThread ); const size_t column( ( i % threadmap.second ) * colsPerThread ); if( row >= (~rhs).rows() || column >= (~rhs).columns() ) continue; const size_t m( min( rowsPerThread, (~rhs).rows() - row ) ); const size_t n( min( colsPerThread, (~rhs).columns() - column ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( submatrix<aligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( submatrix<aligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) ); op( target, source ); } else { auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side sparse matrix to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // matrix to a dense matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side sparse matrix , bool SO2 // Storage order of the right-hand side sparse matrix , typename OP > // Type of the assignment operation void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const size_t threads( omp_get_num_threads() ); const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) ); const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL ); const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 ); const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL ); const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 ); #pragma omp for schedule(dynamic,1) nowait for( size_t i=0; i<threads; ++i ) { const size_t row ( ( i / threadmap.second ) * rowsPerThread ); const size_t column( ( i % threadmap.second ) * colsPerThread ); if( row >= (~rhs).rows() || column >= (~rhs).columns() ) continue; const size_t m( min( rowsPerThread, (~lhs).rows() - row ) ); const size_t n( min( colsPerThread, (~lhs).columns() - column ) ); auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) ); const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be assigned. // \return void // // This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ assign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix. // Due to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a // dense matrix. Due to the explicit application of the SFINAE principle, this function can only // be selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SCHUR PRODUCT ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix for the Schur product. // \return void // // This function implements the default OpenMP-based SMP Schur product assignment to a dense // matrix. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); schurAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix. // \ingroup math // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix for the Schur product. // \return void // // This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due // to the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side dense matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { schurAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ schurAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix. // \ingroup smp // // \param lhs The target left-hand side dense matrix. // \param rhs The right-hand side matrix to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // matrix.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename MT1 // Type of the left-hand side dense matrix , bool SO1 // Storage order of the left-hand side matrix , typename MT2 // Type of the right-hand side matrix , bool SO2 > // Storage order of the right-hand side matrix inline auto smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs ) -> EnableIf_t< IsDenseMatrix_v<MT1> > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" ); BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
nested_loop.c
/* * This example illustrates nested loop parallelization with OpenMP * * @author Apan Qasem */ #include<stdlib.h> #include<stdio.h> #include <omp.h> #define M 4 int main(int argc, char *argv[]) { int N = atoi(argv[1]); omp_set_num_threads(N); int j, k; #pragma omp parallel for private(j) collapse(2) for (k = 0; k < M; k++) for (j = 0; j < M; j++) printf("I am thread %d in iteration (%d,%d)\n", omp_get_thread_num(), k,j); return 0; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename);; image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel(OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel(OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5*previous[u].alpha/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)* length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImage) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace, exception); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image if it contains more than the maximum, otherwise we can disable dithering to improve the performance. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); else cube_info->quantize_info->dither_method=NoDitherMethod; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize* sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/threading.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include "score_updater.hpp" namespace LightGBM { using json11::Json; /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param start_iteration Start index of the iteration to predict * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_pred_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, max_iteration); if (num_iteration > 0) { num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration)); } else { num_pred_in_one_row *= (max_iteration - start_iteration); } } else if (is_pred_contrib) { num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_pred_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output) const override; void PredictContribByMap(const std::unordered_map<int, double>& features, std::vector<std::unordered_map<int, double>>* output) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration, int feature_importance_type) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, int feature_importance_type, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param feature_importance_type Type of feature importance, 0: split, 1: gain * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Calculate upper bound value * \return upper bound value */ double GetUpperBoundValue() const override; /*! * \brief Calculate lower bound value * \return lower bound value */ double GetLowerBoundValue() const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iteration = std::max(start_iteration, 0); start_iteration = std::min(start_iteration, num_iteration_for_pred_); if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration); } else { num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration; } start_iteration_for_pred_ = start_iteration; if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } bool IsLinear() const override { return linear_tree_; } /*! \brief Nesterov schedule */ inline double NesterovSchedule(int iter, int momentum_schedule_version = 0, double nesterov_acc_rate = 0.5, int momentum_offset = 0) const { if (iter < momentum_offset) { return(0.); } else { if (momentum_schedule_version == 0) { return(nesterov_acc_rate); } else if (momentum_schedule_version == 1) { return(1. - (3. / (6. + iter))); } else { return(0.); } } } protected: virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) { if (objective_function != nullptr) { return objective_function->IsConstantHessian(); } else { return false; } } /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; #ifdef USE_CUDA /*! \brief First order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, CHAllocator<score_t>> hessians_; #else /*! \brief First order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_; /*! \brief Second order derivative of training data */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_; #endif /*! \brief Store the indices of in-bag data */ std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Start iteration of used model */ int start_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; const int bagging_rand_block_ = 1024; std::vector<Random> bagging_rands_; ParallelPartitionRunner<data_size_t, false> bagging_runner_; Json forced_splits_json_; bool linear_tree_; /*! \brief If true, Nesterov acceleration is used for boosting */ bool use_nesterov_acc_ = false; /*! \brief Acceleration rate for momentum step in Nesterov step */ double nesterov_acc_rate_ = 0.5; /*! \brief Choose the acceleration rate schedule */ int momentum_schedule_version_ = 0; /*! \brief Acceleration rate is zero before the offset number */ int momentum_offset_ = 0; /*! \brief If true, a Newton update step is done for the tree leaves after the gradient step (only releveant for GPBoost algorithm, i.e. if objective_function_->HasGPModel()==true) */ bool leaves_newton_update_ = false; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_columns </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // </ul> // </li> // </ul> // </li> // <li> \ref custom_operations </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref openmp_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref configuration_files </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref custom_data_types </li> // <li> \ref error_reporting_customization </li> // <li> \ref intra_statement_optimization </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // For maximum performance the \b Blaze library expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Additionally, for computing the determinant of a dense matrix, for the decomposition of dense // matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular // values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of these features is used it is necessary to link the LAPACK library to the final executable. // If no LAPACK library is available the use of these features will result in a linker error. // // Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this // case the Boost library is required to be installed on your system. It is recommended to use the // newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If // you don't have Boost installed on your system, you can download it for free from // <a href="http://www.boost.org">www.boost.org</a>. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_cmake Installation via CMake // // The first step is the installation of the \b Blaze header files. The most convenient way // to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the // following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to // the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode // Windows users can do the same via the cmake-gui. Alternatively, it is possible to include // \b Blaze by adding the following lines in any \c CMakeLists.txt file: \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) endif() \endcode // \n \subsection step_1_installation_unix Manual Installation on Linux/macOS // // Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply // copied to a standard include directory (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Manual Installation on Windows // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects // of \b Blaze can be adapted to specific requirements, environments and architectures. The most // convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt> // subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header // files can be customized manually. In both cases, however, the files are modified. If this is // not an option it is possible to configure \b Blaze via the command line (see the tutorial // section \ref configuration_files or the documentation in the configuration files). // // Since the default settings are reasonable for most systems this step can also be skipped. // However, in order to achieve maximum performance a customization of at least the following // configuration files is required: // // - <b><tt>./blaze/config/BLAS.h</tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt>./blaze/config/CacheSize.h</tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt>./blaze/config/Thresholds.h</tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded; UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded; AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded; AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom vector with size 3 and capacity 16 with aligned, padded and // externally managed integer array. Note that the std::unique_ptr must be guaranteed // to outlive the custom vector! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; typedef CustomVector<int,unaligned,unpadded> CustomType; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affects vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) ); CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomVector<double,aligned,padded> CustomType; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) ); // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); CustomType c( memory3.get(), 3UL, 4UL ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomVector<double,aligned,unpadded> CustomType; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) ); // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( allocate<double>( 3UL ), 3UL ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector // width. In case of unaligned padded vectors the number of padding elements can be greater or // equal the number of padding elements of an aligned padded custom vector. In case the padding // is insufficient with respect to the available instruction set, a \a std::invalid_argument // exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense vector classes can be directly initialized by means of an initializer // list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; \endcode // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v16( v7 ); // Instantiation of the dense column vector v16 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v17( v9 ); // Instantiation of the dense row vector v17 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v18( v1 ); // Instantiation of the sparse column vector v18 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v19( v12 ); // Instantiation of the sparse row vector v19 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense vector: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 4.0, -1.7, 8.6, -7.2 }; \endcode // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all // vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. The first // option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the vector // the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In contrast // to the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_member_functions Member Functions // <hr> // // \subsection vector_operations_size .size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code typedef blaze::DynamicVector<int,rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v1( 10UL ); // Creating a dynamic vector of size 10 SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // // \n \subsection vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity of vectors with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers v1.resize( 10UL ); // Resize to 10, but the capacity is preserved v1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c size(). Please // also note that in case a reallocation occurs, all iterators (including \c end() iterators), all // pointers and references to elements of the vector are invalidated. // // // \n \section vector_operations_free_functions Free Functions // <hr> // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, rows, and columns) is in default state if all its elements are in default state. // For instance, in case the vector is instantiated for a built-in integral or floating point data // type, the function returns \c true in case all vector elements are 0 and \c false in case any // vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single vector, the functions return the smallest and largest element of the given // dense or sparse vector, respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; min( a ); // Returns -5 max( a ); // Returns 7 \endcode // In case the vector currently has a size of 0, both functions return 0. Additionally, in case // a given sparse vector is not completely filled, the zero elements are taken into account. For // example, the following compressed vector has only two non-zero elements. However, the minimum // of this vector is 0: \code blaze::CompressedVector<int> b( 4UL, 2UL ); b[0] = 1; b[2] = 3; min( b ); // Returns 0 \endcode // If passed two or more dense vectors, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given vectors, respectively: \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 ) \endcode // Please note that sparse vectors can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // Results in ( 0 4 14 6 ) \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; typedef std::complex<double> cplx; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = pow( a, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse vector. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicVector<double> a, b; b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors. The following example demonstrates the merging of two vectors of double // precision values into a vector of double precision complex numbers: \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; blaze::DynamicVector< complex<double> > cplx; // Creating the vector // ( (-2.1, 0.3) ) // ( (-4.2, -1.4) ) // ( ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length() // and \c sqrLength() function can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_transpose trans() / transpose() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_conjugate_transpose ctrans() / ctranspose() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given vector expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a dense and a sparse vector: \code using blaze::DynamicVector; using blaze::CompressedVector; blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization auto c = evaluate( a * b ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary vector is created and no copy operation is performed. Instead, the result // is directly written to the target vector due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code CompressedVector<double> d( a * b ); // No temporary & no copy operation DynamicVector<double> e( a * b ); // Temporary & copy operation d = evaluate( a * b ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicVector<double> a, b, c, d; d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = a + eval( b * c ); // No creation of a temporary vector \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded; UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded; AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded; AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr // must be guaranteed to outlive the custom matrix! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; typedef CustomMatrix<int,unaligned,unpadded> CustomType; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affects matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) ); CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; typedef CustomMatrix<double,aligned,padded> CustomType; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) ); // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; typedef CustomMatrix<double,aligned,unpadded> CustomType; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) ); // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), 3UL, 3UL ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_identity_matrix IdentityMatrix // <hr> // // The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/IdentityMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class IdentityMatrix; \endcode // - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to represent an identity matrix: \code // Definition of a 3x3 integral row-major identity matrix blaze::IdentityMatrix<int> A( 3UL ); // Definition of a 6x6 single precision row-major identity matrix blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); // Definition of a double precision column-major identity matrix with 0 rows and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense matrix classes can be directly initialized by means of an initializer // list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; \endcode // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix // M17 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix // M18 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense matrix: \code blaze::DynamicMatrix<double> M; M = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; \endcode // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore, // all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. The first possibility to add elements to a sparse matrix is the function call // operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element.\n // An alternative is the \c set() function: In case the element is not yet contained in the matrix // the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // However, insertion of elements can be better controlled via the \c insert() function. In // contrast to the function call operator and the \c set() function it emits an exception in case // the element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row/column. Additionally, // the index of the new element must be larger than the index of the previous element in the // same row/column. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \note The \c finalize() function has to be explicitly called for each row or column, even // for empty ones! // \note Although \c append() does not allocate new memory, it still invalidates all iterators // returned by the \c end() functions! // // // \n \section matrix_operations_member_functions Member Functions // <hr> // // \subsection matrix_operations_rows .rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \n \subsection matrix_operations_capacity .capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. Per // default, the existing elements are preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The internal capacity of matrices with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved M1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c rows() times // \c columns(). Please also note that in case a reallocation occurs, all iterators (including // \c end() iterators), all pointers and references to elements of this matrix are invalidated. // // // \n \section matrix_operations_free_functions Free Functions // <hr> // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_min_max min() / max() // // The \c min() and the \c max() functions return the smallest and largest element of the given // dense or sparse matrix, respectively: \code using blaze::rowMajor; blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 }, { 4, 0, 1 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B{ { -5, 2, -7 }, { -4, 0, -1 } }; min( A ); // Returns -5 min( B ); // Returns -7 max( A ); // Returns 7 max( B ); // Returns 2 \endcode // In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in // case a given sparse matrix is not completely filled, the zero elements are taken into account. // For example: the following compressed matrix has only 2 non-zero elements. However, the minimum // of this matrix is 0: \code blaze::CompressedMatrix<int> C( 2UL, 3UL ); C(0,0) = 1; C(0,2) = 3; min( C ); // Returns 0 \endcode // Also note that the \c min() and \c max() functions can be used to compute the smallest and // largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_trace trace() // // The \c trace() function sums the diagonal elements of a square dense or sparse matrix: \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { 7, -8, -9 } }; trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode // In case the given matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; typedef std::complex<double> cplx; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = pow( A, 1.2 ); // Computes the exponential value of each element \endcode // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on matrices. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse matrix. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense matrices. The following example demonstrates the merging of two matrices of double // precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the vector // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \subsection matrix_operations_matrix_transpose trans() / transpose() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // Additionally, matrices can be transposed in-place via the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_conjugate_transpose ctrans() / ctranspose() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_matrix_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given matrix expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a lower and a strictly lower dense // matrix: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::StrictlyLowerMatrix; LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< DynamicMatrix<double> > B; // ... Resizing and initialization auto C = evaluate( A * B ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary matrix is created and no copy operation is performed. Instead, the result // is directly written to the target matrix due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B ); // Temporary & copy operation D = evaluate( A * B ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicMatrix<double> A, B, C, D; D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = A + eval( B * C ); // No creation of a temporary matrix \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_declaration_operations Declaration Operations // <hr> // // \subsection matrix_operations_declsym declsym() // // The \c declsym() operation can be used to explicitly declare any matrix or matrix expression // as symmetric: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declsym( A ); \endcode // Any matrix or matrix expression that has been declared as symmetric via \c declsym() will // gain all the benefits of a symmetric matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isSymmetric( declsym( A ) ); // Will always return true without runtime effort S = declsym( A ); // Omit any runtime check for symmetry C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declsym() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-symmetric matrix or // matrix expression as symmetric via the \c declsym() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declherm declherm() // // The \c declherm() operation can be used to explicitly declare any matrix or matrix expression // as Hermitian: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declherm( A ); \endcode // Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will // gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isHermitian( declherm( A ) ); // Will always return true without runtime effort S = declherm( A ); // Omit any runtime check for Hermitian symmetry C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declherm() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-Hermitian matrix or // matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decllow decllow() // // The \c decllow() operation can be used to explicitly declare any matrix or matrix expression // as lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decllow( A ); \endcode // Any matrix or matrix expression that has been declared as lower triangular via \c decllow() // will gain all the benefits of a lower triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isLower( decllow( A ) ); // Will always return true without runtime effort L = decllow( A ); // Omit any runtime check for A being a lower matrix C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decllow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-lower matrix or // matrix expression as lower triangular via the \c decllow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declupp declupp() // // The \c declupp() operation can be used to explicitly declare any matrix or matrix expression // as upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper triangular via \c declupp() // will gain all the benefits of a upper triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UpperMatrix; DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... Resizing and initialization isUpper( declupp( A ) ); // Will always return true without runtime effort U = declupp( A ); // Omit any runtime check for A being a upper matrix C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-upper matrix or // matrix expression as upper triangular via the \c declupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decldiag decldiag() // // The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression // as diagonal: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decldiag( A ); \endcode // Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will // gain all the benefits of a diagonal matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isDiagonal( decldiag( A ) ); // Will always return true without runtime effort D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decldiag() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-diagonal matrix // or matrix expression as diagonal via the \c decldiag() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declid declid() // // The \c declid() operation can be used to explicitly declare any matrix or matrix expression // as identity matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declid( A ); \endcode // Any matrix or matrix expression that has been declared as identity matrix via \c declid() will // gain all the benefits of an identity matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isIdentity( declid( A ) ); // Will always return true without runtime effort D = declid( A ); // Omit any runtime check for A being a diagonal matrix C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an // identity matrix, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declid() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-identity matrix // or matrix expression as identity matrix via the \c declid() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> // // The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V ); } // namespace blaze \endcode // The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second // function additionally computes the eigenvectors. The eigenvalues are returned in the given vector // \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the // correct dimensions (if possible and necessary). // // Depending on the given matrix type, the resulting eigenvalues are either of floating point // or complex type: In case the given matrix is either a compile time symmetric matrix with // floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues // will be of floating point type and therefore the elements of the given eigenvalue vector are // expected to be of floating point type. In all other cases they are expected to be of complex // type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except // that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having // the positive imaginary part first. // // In case \a A is a row-major matrix, the left eigenvectors are returned in the rows of \a V, // in case \a A is a column-major matrix, the right eigenvectors are returned in the columns of // \a V. In case the given matrix is a compile time symmetric matrix with floating point elements, // the resulting eigenvectors will be of floating point type and therefore the elements of the // given eigenvector matrix are expected to be of floating point type. In all other cases they // are expected to be of complex type. // // The following examples give an impression of the computation of eigenvalues and eigenvectors // for a general, a symmetric, and an Hermitian matrix: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... Initialization DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // In all failure cases an exception is thrown. // // \note All \c eigen() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of // LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available // and linked to the executable. Otherwise a linker error will be created. // // // \n \section matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> // // The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd() // functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 > void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST > size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The first and third function compute only singular values of the given general \a m-by-\a n // matrix, the second and fourth function additionally compute singular vectors. The resulting // singular values are returned in the given vector \a s, the left singular vectors are returned // in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, // \a U, and \a V are resized to the correct dimensions (if possible and necessary). // // The third and fourth function allow for the specification of a subset of singular values and/or // vectors. The number of singular values and vectors to be computed is specified by the lower // bound \a low and the upper bound \a upp, which either form an integral or a floating point // range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // In all failure cases an exception is thrown. // // Examples: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V ); \endcode \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V, 0, 2 ); \endcode // \note All \c svd() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the singular values and/or singular vectors of a dense matrix by // means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is // available and linked to the executable. Otherwise a linker error will be created. // // // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix // to be assigned is not symmetric at compile time, a runtime check is performed. // // // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric; DynamicSymmetric A( 10UL ); Row<DynamicSymmetric> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; typedef std::complex<double> cplx; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; typedef complex<float> cplx; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a Hermitian matrix. In case the matrix // to be assigned is not Hermitian at compile time, a runtime check is performed. // // // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::Row; using blaze::rowMajor; using blaze::columnMajor; typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian; DynamicHermitian A( 10UL ); // Both Hermitian and symmetric Row<DynamicHermitian> row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix; // Recommendation 2: use sparse matrices for large diagonal matrices typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a triangular matrix. In case the // matrix to be assigned does not satisfy the invariants of the triangular matrix at compile // time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular // and strictly triangular matrix types can be used in the same way, but may pose some additional // restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row or column of a matrix. As such, views act as a reference to a specific part of a vector // or matrix. This reference is valid and can be used in every way as any other vector or matrix // can be used as long as the referenced vector or matrix is not resized or entirely destroyed. // Views also act as alias to the elements of the vector or matrix: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in // the vector or matrix and changes made via the vector or matrix are immediately visible in the // view. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_columns // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix // // ( 1 0 -2 3 0 ) // ( 0 2 5 -1 -1 ) // ( 1 0 0 2 1 ) // DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector // // ( 18 19 ) // StaticVector<int,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_class The Subvector Class Template // <hr> // // The blaze::Subvector class template represents a view on a specific subvector of a dense or // sparse vector primitive. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The type of the vector is specified via two template parameters: \code template< typename VT, bool AF > class Subvector; \endcode // - \c VT: specifies the type of the vector primitive. Subvector can be used with every vector // primitive or view, but does not work with any vector expression type. // - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. This view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. A subvector created from a row // vector can be used as any other row vector, a subvector created from a column vector can be // used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector // of a vector primitive on the left-hand side of an assignment or to grant read-access to a // specific subvector of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2; SparseVectorType s1, s2; // ... Resizing and initialization // Creating a view on the first ten elements of the dense vector d1 blaze::Subvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL ); // Creating a view on the second ten elements of the sparse vector s1 blaze::Subvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL ); // Creating a view on the addition of d2 and s2 dsv = subvector( d2 + s2, 5UL, 10UL ); // Creating a view on the multiplication of d2 and s2 ssv = subvector( d2 * s2, 2UL, 10UL ); \endcode // The \c subvector() function can be used on any dense or sparse vector, including expressions, // as demonstrated in the example. Note however that a blaze::Subvector can only be instantiated // with a dense or sparse vector primitive, i.e. with types that can be written, and not with an // expression type. // // // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. For instance, the current // number of elements can be obtained via the \c size() function, the current capacity via the // \c capacity() function, and the number of non-zero elements via the \c nonZeros() function. // However, since subvectors are references to a specific range of a vector, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense subvector view: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v SubvectorType sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector SubvectorType sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case // of constant subvectors a ConstIterator is returned: \code typedef blaze::DynamicVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the dense vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code typedef blaze::CompressedVector<int,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of the sparse vector v SubvectorType sv = subvector( v, 16UL, 64UL ); for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedVector<double,blaze::rowVector> VectorType; VectorType v( 256UL ); // Non-initialized vector of size 256 typedef blaze::Subvector<VectorType> SubvectorType; SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the vector it is inserted into the vector, if it is already contained // in the vector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; DenseVectorType d1, d2, d3; SparseVectorType s1, s2; // ... Resizing and initialization typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; typedef blaze::Subvector<DenseVectorType> SubvectorType; SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 dsv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = dsv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType; DenseVectorType x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] blaze::Subvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL ); blaze::Subvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned dense subvector in the range [8..23] blaze::Subvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; using blaze::columnVector; typedef blaze::DynamicVector<double,columnVector> VectorType; typedef blaze::Subvector<VectorType,aligned> SubvectorType; VectorType d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType; SparseVectorType x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] blaze::Subvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL ); \endcode // \n \section views_subvectors_on_subvectors Subvectors on Subvectors // <hr> // // It is also possible to create a subvector view on another subvector. In this context it is // important to remember that the type returned by the \c subvector() function is the same type // as the type of the given subvector, not a nested subvector type, since the view on a subvector // is just another view on the underlying vector: \code typedef blaze::DynamicVector<double,blaze::rowVector> VectorType; typedef blaze::Subvector<VectorType> SubvectorType; VectorType d1; // ... Resizing and initialization // Creating a subvector view on the dense vector d1 SubvectorType sv1 = subvector( d1, 5UL, 10UL ); // Creating a subvector view on the dense subvector sv1 SubvectorType sv2 = subvector( sv1, 1UL, 5UL ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_class The Submatrix Class Template // <hr> // // The blaze::Submatrix class template represents a view on a specific submatrix of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The type of the matrix is specified via two template parameters: \code template< typename MT, bool AF > class Submatrix; \endcode // - \c MT: specifies the type of the matrix primitive. Submatrix can be used with every matrix // primitive, but does not work with any matrix expression type. // - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or // unaligned (blaze::unaligned). The default value is blaze::unaligned. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a submatrix can be created very conveniently via the \c submatrix() function. // This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied // from, and it can be used in arithmetic operations. A submatrix created from a row-major // matrix will itself be a row-major matrix, a submatrix created from a column-major matrix // will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType; DenseMatrixType D1, D2; SparseMatrixType S1, S2; // ... Resizing and initialization // Creating a view on the first 8x16 block of the dense matrix D1 blaze::Submatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL ); // Creating a view on the second 8x16 block of the sparse matrix S1 blaze::Submatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL ); // Creating a view on the addition of D2 and S2 dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL ); // Creating a view on the multiplication of D2 and S2 ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // The current size of the matrix, i.e. the number of rows or columns can be obtained via the // \c rows() and \c columns() functions, the current total capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since submatrices // are views on a specific submatrix of a matrix, several operations are not possible on views, // such as resizing and swapping: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A; // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator, // which allows a manipulation of the non-zero values, in case of constant submatrices a // ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the dense matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of the sparse matrix A SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 typedef blaze::Submatrix<MatrixType> SubmatrixType; SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in case of sparse matrices, elements can also be inserted via the append() function. // In case of submatrices, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index in the according row or column of the // submatrix and that the according row's or column's capacity is large enough to hold the new // element. Note however that due to the nature of a submatrix, which may be an alias to the // middle of a sparse matrix, the append() function does not work as efficiently for a // submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; DenseMatrixType D1, D2, D3; SparseMatrixType S1, S2; typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType; SparseVectorType a, b; // ... Resizing and initialization typedef Submatrix<DenseMatrixType> SubmatrixType; SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the blaze::unaligned flag: \code using blaze::unaligned; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType; DenseMatrixType A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); blaze::Submatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; using blaze::rowMajor; typedef blaze::DynamicMatrix<double,rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType,aligned> SubmatrixType; MatrixType D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType; SparseMatrixType A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 blaze::Submatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_submatrices Submatrices on Submatrices // <hr> // // It is also possible to create a submatrix view on another submatrix. In this context it is // important to remember that the type returned by the \c submatrix() function is the same type // as the type of the given submatrix, since the view on a submatrix is just another view on the // underlying matrix: \code typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType; typedef blaze::Submatrix<MatrixType> SubmatrixType; MatrixType D1; // ... Resizing and initialization // Creating a submatrix view on the dense matrix D1 SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL ); // Creating a submatrix view on the dense submatrix sm1 SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::Submatrix; typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType; typedef Submatrix< SymmetricDynamicType > SubmatrixType; // Setup of a 16x16 symmetric matrix SymmetricDynamicType A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_class The Row Class Template // <hr> // // The blaze::Row class template represents a reference to a specific row of a dense or sparse // matrix primitive. It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Row; \endcode // \c MT specifies the type of the matrix primitive. Row can be used with every matrix primitive, // but does not work with any matrix expression type. // // // \n \section views_rows_setup Setup of Rows // <hr> // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // This reference can be treated as any other row vector, i.e. it can be assigned to, it can be // copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,rowVector> DenseVectorType; typedef blaze::CompressedVector<double,rowVector> SparseVectorType; typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x blaze::Row<DenseMatrixType> row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // The \c row() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, rows cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. For instance, the current number of elements // can be obtained via the \c size() function, the current capacity via the \c capacity() function, // and the number of non-zero elements via the \c nonZeros() function. However, since rows are // references to specific rows of a matrix, several operations are not possible on views, such // as resizing and swapping. The following example shows this by means of a dense row view: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A RowType row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix RowType row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of the row can be directly accessed with the subscript operator. The numbering // of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of // a row can be traversed via iterators. Just as with vectors, in case of non-const rows, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant row a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A RowType row31 = row( A, 31UL ); for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType; MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix typedef blaze::Row<MatrixType> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix; DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix typedef blaze::Row<DenseMatrix> RowType; RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Row<MatrixType> RowType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st row of a column-major matrix A RowType row1 = row( A, 1UL ); for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices CompressedMatrix<double,columnMajor> A( 128UL, 128UL ); CompressedMatrix<double,columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... CompressedVector<double,rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. CompressedVector<double,rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_columns_class The Column Class Template // <hr> // // The blaze::Column class template represents a reference to a specific column of a dense or // sparse matrix primitive. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The type of the matrix is specified via template parameter: \code template< typename MT > class Column; \endcode // \c MT specifies the type of the matrix primitive. Column can be used with every matrix // primitive, but does not work with any matrix expression type. // // // \n \section views_colums_setup Setup of Columns // <hr> // // Similar to the setup of a row, a reference to a dense or sparse column can be created very // conveniently via the \c column() function. This reference can be treated as any other column // vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The // column can either be used as an alias to grant write access to a specific column of a matrix // primitive on the left-hand side of an assignment or to grant read-access to a specific column // of a matrix primitive or expression on the right-hand side of an assignment. The following // two examples demonstrate this for dense and sparse matrices: \code typedef blaze::DynamicVector<double,columnVector> DenseVectorType; typedef blaze::CompressedVector<double,columnVector> SparseVectorType; typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType; typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType; DenseVectorType x; SparseVectorType y; DenseMatrixType A, B; SparseMatrixType C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x blaze::Column<DenseMatrixType> col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // The \c column() function can be used on any dense or sparse matrix, including expressions, as // illustrated by the source code example. However, columns cannot be instantiated for expression // types, but only for matrix primitives, respectively, i.e. for matrix types that offer write // access. // // // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. For instance, the current number of // elements can be obtained via the \c size() function, the current capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // columns are references to specific columns of a matrix, several operations are not possible on // views, such as resizing and swapping. The following example shows this by means of a dense // column view: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A ColumnType col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix ColumnType col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of the column can be directly accessed with the subscript operator. The numbering // of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of // a column can be traversed via iterators. Just as with vectors, in case of non-const columns, // \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero // value, in case of a constant column a ConstIterator is returned: \code typedef blaze::DynamicMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code typedef blaze::CompressedMatrix<int,columnMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A ColumnType col31 = column( A, 31UL ); for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix typedef blaze::Column<MatrixType> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType; MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix typedef blaze::Column<DenseMatrix> ColumnType; ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code typedef blaze::CompressedMatrix<int,rowMajor> MatrixType; typedef blaze::Column<MatrixType> ColumnType; MatrixType A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 31st column of a row-major matrix A ColumnType col1 = column( A, 1UL ); for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a view on a matrix with // a fitting storage orientation. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices CompressedMatrix<double,rowMajor> A( 128UL, 128UL ); CompressedMatrix<double,rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... CompressedVector<double,columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th column of the row-major matrix B with A. CompressedVector<double,columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix B would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n \section schur_product Componentwise Multiplication / Schur Product // <hr> // // Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns) // via the modulo operator results in a componentwise multiplication (Schur product) of the two // matrices: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, 35UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 % M2; \endcode // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix product can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. It is also possible to multiply two matrices with different element type, as // long as the element types themselves can be multiplied and added. Note however that the // highest performance for a multiplication between two matrices can be expected for two // matrices with the same scalar element type. // // In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper // triangular, or diagonal, the computation can be optimized by explicitly declaring the // multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by // means of the \ref matrix_operations_declaration_operations : \code using blaze::DynamicMatrix; DynamicMatrix<double> M1, M2, M3; // ... Initialization of the square matrices M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal \endcode // Using a declaration operation on the a multiplication expression can speed up the computation // by a factor of 2. Note however that the caller of the according declaration operation takes // full responsibility for the correctness of the declaration. Falsely declaring a multiplication // as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined // behavior! // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref custom_operations */ //************************************************************************************************* //**Custom Operations****************************************************************************** /*!\page custom_operations Custom Operations // // In addition to the provided operations on vectors and matrices it is possible to define custom // operations. For this purpose, \b Blaze provides the \c map() function, which allows to pass // the required operation via functor or lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ){ return std::sqrt( d ); } ); \endcode // This example demonstrates the most convenient way of defining a unary custom operation by // passing a lambda to the \c map() function. The lambda is executed on each single element of // a dense vector or matrix or each non-zero element of a sparse vector or matrix. // // Alternatively, it is possible to pass a custom functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = map( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c map() it must define a function call operator, // which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::cint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::cuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::cint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::cuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::cint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::cuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::cint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::cuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::cfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::cdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } simd_double_t load( simd_double_t a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( T a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; } // namespace blaze \endcode // The same approach can be taken for binary custom operations. The following code demonstrates // the \c Min functor of the \b Blaze library, which is working for all data types that provide // a \c min() operation: \code struct Min { explicit inline Min() {} template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const { return min( a, b ); } template< typename T1, typename T2 > static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; } template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // One of the main motivations of the \b Blaze 1.x releases was to achieve maximum performance // on a single CPU core for all possible operations. However, today's CPUs are not single core // anymore, but provide several (homogeneous or heterogeneous) compute cores. In order to fully // exploit the performance potential of a multicore CPU, computations have to be parallelized // across all available cores of a CPU. For this purpose, \b Blaze provides three different // shared memory parallelization techniques: // // - \ref openmp_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // // In addition, \b Blaze provides means to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref custom_operations &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11 // threads are enabled on the command line, the OpenMP-based parallelization has priority and // is preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based on Boost // threads. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations // have priority, i.e. are preferred in case either is enabled in combination with the Boost // thread parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt>./blaze/config/Thresholds.h</tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // \n \section lapack_introction Introduction // <hr> // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They // provide the parameters of the original LAPACK functions and thus provide maximum flexibility: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const int m ( numeric_cast<int>( A.rows() ) ); // == N const int n ( numeric_cast<int>( A.columns() ) ); // == N const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N const int lwork( n*lda ); const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required const std::unique_ptr<double[]> work( new double[N] ); // No initialization required int info( 0 ); getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info' \endcode // Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These // wrappers provide a maximum of convenience: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports failure via exception \endcode // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if a fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors // // \subsection lapack_eigenvalues_general General Matrices // // The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of // the given general matrix: \code namespace blaze { void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR ); } // namespace blaze \endcode // The complex eigenvalues of the given matrix \a A are returned in the given vector \a w. // Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs // of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part // first. // // If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR // in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major // matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies \f[ A * v[j] = lambda[j] * v[j], \f] // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL // in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The // functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // The first four functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices // // The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(), // which compute the eigenvalues and eigenvectors of the given symmetric matrix: \code namespace blaze { void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info ); void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info ); void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix: \code namespace blaze { void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info ); void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices // // The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(), // which compute the eigenvalues and eigenvectors of the given Hermitian matrix: \code namespace blaze { void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info ); void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info ); void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: \code namespace blaze { void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info ); void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \section lapack_singular_values Singular Values/Singular Vectors // // The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given // general matrix: \code namespace blaze { void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they compute the singular value decomposition (SVD) of the given general matrix by // applying a divide-and-conquer strategy for the computation of the left and right singular // vectors: \code namespace blaze { void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); } // namespace blaze \endcode // The resulting decomposition has the form \f[ A = U \cdot S \cdot V, \f] // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n) // columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively. // // The resulting min(\a m,\a n) real and non-negative singular values are returned in descending // order in the vector \a s, which is resized to the correct size (if possible and necessary). // // Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or // vectors: \code namespace blaze { void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The number of singular values to be computed is specified by the lower bound \a low and the // upper bound \a upp, which either form an integral or a floating point range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // The first four functions report failure via the \c info argument, the remaining functions throw // an exception in case of an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref configuration_files \n */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. For a complete // overview of all customization opportunities, please go to the configuration files in the // <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library. // The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode // Alternatively the default transpose flag can be specified via command line or by defining this // symbol manually before including any Blaze header file: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all matrices of the \b Blaze library can be specified. \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode // Alternatively the default storage order can be specified via command line or by defining this // symbol manually before including any Blaze header file: \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // Alternatively, both settings can be specified via command line or by defining the symbols // manually before including any Blaze header file: \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_PARALLEL 1 #include <blaze/Blaze.h> \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode // The cache size can also be specified via command line or by defining this symbol manually // before including any Blaze header file: \code #define BLAZE_CACHE_SIZE 3145728UL #include <blaze/Blaze.h> \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which instruction set is available. However, it is possible // to disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt>./blaze/config/Thresholds.h</tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1; \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code #define BLAZE_USE_STREAMING 1 \endcode // Alternatively streaming can be (de-)activated via command line or by defining this symbol // manually before including any Blaze header file: \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode // If \c useStreaming is set to 1 streaming is enabled, if it is set to 0 streaming is disabled. // It is recommended to consult the target architecture's white papers to decide whether streaming // is beneficial or hurtful for performance. // // // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_example Example // <hr> // // The following example demonstrates a complete multiplication between a statically sized block // matrix and block vector: \code // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) typedef StaticMatrix<int,2UL,2UL,rowMajor> M2x2; typedef StaticVector<int,2UL,columnVector> V2; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) } { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_data_types \n */ //************************************************************************************************* //**Custom Data Types****************************************************************************** /*!\page custom_data_types Custom Data Types // // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct SubTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct MultTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; template<> struct DivTrait<custom::double_t,custom::double_t> { typedef custom::double_t Type; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref error_reporting_customization \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref custom_data_types &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref error_reporting_customization */ //************************************************************************************************* #endif
pr85696.c
/* PR c/85696 */ #ifndef __cplusplus void foo (int n, int a[][n]) { #pragma omp parallel shared(a) default(none) #pragma omp master a[23][0] = 42; } #endif void bar (int n, void *p) { int (*a)[n] = (int (*)[n]) p; #pragma omp parallel shared(a) default(none) #pragma omp master a[23][0] = 42; }
bondarenko_2004.c
#include <stdlib.h> #include "bondarenko_2004.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { sv[0] = -82.4202f; // V millivolt sv[1] = 0.115001f; // Cai msvromolar sv[2] = 0.115001f; // Cass msvromolar sv[3] = 1299.5f; // CaJSR msvromolar sv[4] = 1299.5f; // CaNSR msvromolar sv[5] = 0.0f; // P_RyR dimensionless sv[6] = 11.2684f; // LTRPN_Ca msvromolar sv[7] = 125.29f; // HTRPN_Ca msvromolar sv[8] = 0.149102e-4f; // P_O1 dimensionless sv[9] = 0.951726e-10f; // P_O2 dimensionless sv[10] = 0.16774e-3f; // P_C2 dimensionless sv[11] = 0.930308e-18f; // O dimensionless sv[12] = 0.124216e-3f; // C2 dimensionless sv[13] = 0.578679e-8f; // C3 dimensionless sv[14] = 0.119816e-12f; // C4 dimensionless sv[15] = 0.497923e-18f; // I1 dimensionless sv[16] = 0.345847e-13f; // I2 dimensionless sv[17] = 0.185106e-13f; // I3 dimensionless sv[18] = 14237.1f; // Nai msvromolar sv[19] = 0.020752f; // C_Na2 dimensionless sv[20] = 0.279132e-3f; // C_Na1 dimensionless sv[21] = 0.713483e-6f; // O_Na dimensionless sv[22] = 0.153176e-3f; // IF_Na dimensionless sv[23] = 0.673345e-6f; // I1_Na dimensionless sv[24] = 0.155787e-8f; // I2_Na dimensionless sv[25] = 0.0113879f; // sv_Na2 dimensionless sv[26] = 0.34278f; // sv_Na3 dimensionless sv[27] = 143720.0f; // Ki msvromolar sv[28] = 0.265563e-2f; // ato_f dimensionless sv[29] = 0.999977f; // ito_f dimensionless sv[30] = 0.417069e-3f; // ato_s dimensionless sv[31] = 0.998543f; // ito_s dimensionless sv[32] = 0.262753e-3f; // nKs dimensionless sv[33] = 0.417069e-3f; // aur dimensionless sv[34] = 0.998543f; // iur dimensionless sv[35] = 0.417069e-3f; // aKss dimensionless sv[36] = 1.0f; // iKss dimensionless sv[37] = 0.641229e-3f; // C_K2 dimensionless sv[38] = 0.992513e-3f; // C_K1 dimensionless sv[39] = 0.175298e-3f; // O_K dimensionless sv[40] = 0.319129e-4f; // I_K dimensionless } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current); for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current) { // State variables const real V_old_ = sv[0]; // initial value = -82.4202 millivolt const real Cai_old_ = sv[1]; // initial value = 0.115001 micromolar const real Cass_old_ = sv[2]; // initial value = 0.115001 micromolar const real CaJSR_old_ = sv[3]; // initial value = 1299.5 micromolar const real CaNSR_old_ = sv[4]; // initial value = 1299.5 micromolar const real P_RyR_old_ = sv[5]; // initial value = 0 dimensionless const real LTRPN_Ca_old_ = sv[6]; // initial value = 11.2684 micromolar const real HTRPN_Ca_old_ = sv[7]; // initial value = 125.29 micromolar const real P_O1_old_ = sv[8]; // initial value = 0.149102e-4 dimensionless const real P_O2_old_ = sv[9]; // initial value = 0.951726e-10 dimensionless const real P_C2_old_ = sv[10]; // initial value = 0.16774e-3 dimensionless const real O_old_ = sv[11]; // initial value = 0.930308e-18 dimensionless const real C2_old_ = sv[12]; // initial value = 0.124216e-3 dimensionless const real C3_old_ = sv[13]; // initial value = 0.578679e-8 dimensionless const real C4_old_ = sv[14]; // initial value = 0.119816e-12 dimensionless const real I1_old_ = sv[15]; // initial value = 0.497923e-18 dimensionless const real I2_old_ = sv[16]; // initial value = 0.345847e-13 dimensionless const real I3_old_ = sv[17]; // initial value = 0.185106e-13 dimensionless const real Nai_old_ = sv[18]; // initial value = 14237.1 micromolar const real C_Na2_old_ = sv[19]; // initial value = 0.020752 dimensionless const real C_Na1_old_ = sv[20]; // initial value = 0.279132e-3 dimensionless const real O_Na_old_ = sv[21]; // initial value = 0.713483e-6 dimensionless const real IF_Na_old_ = sv[22]; // initial value = 0.153176e-3 dimensionless const real I1_Na_old_ = sv[23]; // initial value = 0.673345e-6 dimensionless const real I2_Na_old_ = sv[24]; // initial value = 0.155787e-8 dimensionless const real IC_Na2_old_ = sv[25]; // initial value = 0.0113879 dimensionless const real IC_Na3_old_ = sv[26]; // initial value = 0.34278 dimensionless const real Ki_old_ = sv[27]; // initial value = 143720 micromolar const real ato_f_old_ = sv[28]; // initial value = 0.265563e-2 dimensionless const real ito_f_old_ = sv[29]; // initial value = 0.999977 dimensionless const real ato_s_old_ = sv[30]; // initial value = 0.417069e-3 dimensionless const real ito_s_old_ = sv[31]; // initial value = 0.998543 dimensionless const real nKs_old_ = sv[32]; // initial value = 0.262753e-3 dimensionless const real aur_old_ = sv[33]; // initial value = 0.417069e-3 dimensionless const real iur_old_ = sv[34]; // initial value = 0.998543 dimensionless const real aKss_old_ = sv[35]; // initial value = 0.417069e-3 dimensionless const real iKss_old_ = sv[36]; // initial value = 1 dimensionless const real C_K2_old_ = sv[37]; // initial value = 0.641229e-3 dimensionless const real C_K1_old_ = sv[38]; // initial value = 0.992513e-3 dimensionless const real O_K_old_ = sv[39]; // initial value = 0.175298e-3 dimensionless const real I_K_old_ = sv[40]; // initial value = 0.319129e-4 dimensionless // Parameters const real Acap = 1.534e-4f; // cm2 const real Cm = 1.0f; // microF_per_cm2 const real Vmyo = 25.84e-6f; // microlitre const real F = 96.5f; // coulomb_per_millimole const real VJSR = 0.12e-6f; // microlitre const real Vss = 1.485e-9f; // microlitre const real VNSR = 2.098e-6f; // microlitre const real CMDN_tot = 50.0f; // micromolar const real Km_CMDN = 0.238f; // micromolar const real CSQN_tot = 15000.0f; // micromolar const real Km_CSQN = 800.0f; // micromolar const real v1 = 4.5f; // per_millisecond const real tau_tr = 20.0f; // millisecond const real tau_xfer = 8.0f; // millisecond const real v2 = 1.74e-5f; // per_millisecond const real v3 = 0.45f; // micromolar_per_millisecond const real Km_up = 0.5f; // micromolar const real k_plus_htrpn = 0.00237f; // per_micromolar_millisecond const real HTRPN_tot = 140.0f; // micromolar const real k_plus_ltrpn = 0.0327f; // per_micromolar_millisecond const real LTRPN_tot = 70.0f; // micromolar const real k_minus_htrpn = 3.2e-5f; // per_millisecond const real k_minus_ltrpn = 0.0196f; // per_millisecond const real i_CaL_max = 7.0f; // picoA_per_picoF const real k_plus_a = 0.006075f; // micromolar4_per_millisecond const real n = 4.0f; // dimensionless const real k_minus_b = 0.965f; // per_millisecond const real k_minus_c = 0.0008f; // per_millisecond const real k_minus_a = 0.07125f; // per_millisecond const real k_plus_b = 0.00405f; // micromolar3_per_millisecond const real m = 3.0f; // dimensionless const real k_plus_c = 0.009f; // per_millisecond const real g_CaL = 0.1729f; // milliS_per_microF const real E_CaL = 63.0f; // millivolt const real Kpcb = 0.0005f; // per_millisecond const real Kpc_max = 0.23324f; // per_millisecond const real Kpc_half = 20.0f; // micromolar const real i_pCa_max = 1.0f; // picoA_per_picoF const real Km_pCa = 0.5f; // micromolar const real k_NaCa = 292.8f; // picoA_per_picoF const real K_mNa = 87500.0f; // micromolar const real Nao = 140000.0f; // micromolar const real K_mCa = 1380.0f; // micromolar const real Cao = 1800.0f; // micromolar const real k_sat = 0.1f; // dimensionless const real eta = 0.35f; // dimensionless const real R = 8.314f; // joule_per_mole_kelvin const real T = 298.0f; // kelvin const real g_Cab = 0.000367f; // milliS_per_microF const real g_Na = 13.0f; // milliS_per_microF const real Ko = 5400.0f; // micromolar const real g_Nab = 0.0026f; // milliS_per_microF const real g_Kto_f = 0.4067f; // milliS_per_microF const real g_Kto_s = 0.0f; // milliS_per_microF const real g_Ks = 0.00575f; // milliS_per_microF const real g_Kur = 0.16f; // milliS_per_microF const real g_Kss = 0.05f; // milliS_per_microF const real g_Kr = 0.078f; // milliS_per_microF const real kf = 0.023761f; // per_millisecond const real kb = 0.036778f; // per_millisecond const real i_NaK_max = 0.88f; // picoA_per_picoF const real Km_Nai = 21000.0f; // micromolar const real Km_Ko = 1500.0f; // micromolar const real g_ClCa = 10.0f; // milliS_per_microF const real Km_Cl = 10.0f; // micromolar const real E_Cl = -40.0f; // millivolt // Algebraic Equations real calc_i_stim = stim_current; //0 real calc_Bi = powf((1.0f+((CMDN_tot*Km_CMDN)/powf((Km_CMDN+Cai_old_),2.0f))),(-1.0f)); //6 real calc_Bss = powf((1.0f+((CMDN_tot*Km_CMDN)/powf((Km_CMDN+Cass_old_),2.0f))),(-1.0f)); //7 real calc_BJSR = powf((1.0f+((CSQN_tot*Km_CSQN)/powf((Km_CSQN+CaJSR_old_),2.0f))),(-1.0f)); //8 real calc_J_rel = (v1*(P_O1_old_+P_O2_old_)*(CaJSR_old_-Cass_old_)*P_RyR_old_); //9 real calc_J_tr = ((CaNSR_old_-CaJSR_old_)/tau_tr); //10 real calc_J_xfer = ((Cass_old_-Cai_old_)/tau_xfer); //11 real calc_J_leak = (v2*(CaNSR_old_-Cai_old_)); //12 real calc_J_up = ((v3*powf(Cai_old_,2.0f))/(powf(Km_up,2.0f)+powf(Cai_old_,2.0f))); //13 real calc_J_trpn = (((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))+(k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_)))-((k_minus_htrpn*HTRPN_Ca_old_)+(k_minus_ltrpn*LTRPN_Ca_old_))); //14 real calc_P_C1 = (1.0f-(P_C2_old_+P_O1_old_+P_O2_old_)); //19 real calc_i_CaL = (g_CaL*O_old_*(V_old_-E_CaL)); //22 real calc_C1 = (1.0f-(O_old_+C2_old_+C3_old_+C4_old_+I1_old_+I2_old_+I3_old_)); //24 real calc_alpha = ((0.4f*expf(((V_old_+12.0f)/10.0f))*((1.0f+(0.7f*expf(((-powf((V_old_+40.0f),2.0f))/10.0f))))-(0.75f*expf(((-powf((V_old_+20.0f),2.0f))/400.0f)))))/(1.0f+(0.12f*expf(((V_old_+12.0f)/10.0f))))); //31 real calc_beta = (0.05f*expf(((-(V_old_+12.0f))/13.0f))); //32 real calc_gamma = ((Kpc_max*Cass_old_)/(Kpc_half+Cass_old_)); //33 real calc_Kpcf = (13.0f*(1.0f-expf(((-powf((V_old_+14.5f),2.0f))/100.0f)))); //34 real calc_i_pCa = ((i_pCa_max*powf(Cai_old_,2.0f))/(powf(Km_pCa,2.0f)+powf(Cai_old_,2.0f))); //35 real calc_i_NaCa = (((((((k_NaCa*1.0f)/(powf(K_mNa,3.0)+powf(Nao,3.0)))*1.0f)/(K_mCa+Cao))*1.0f)/(1.0f+(k_sat*expf((((eta-1.0f)*V_old_*F)/(R*T))))))*((expf(((eta*V_old_*F)/(R*T)))*powf(Nai_old_,3.0)*Cao)-(expf((((eta-1.0f)*V_old_*F)/(R*T)))*powf(Nao,3.0)*Cai_old_))); //36 real calc_E_CaN = (((R*T)/(2.0f*F))*logf((Cao/Cai_old_))); //38 real calc_E_Na = (((R*T)/F)*logf((((0.9f*Nao)+(0.1f*Ko))/((0.9f*Nai_old_)+(0.1f*Ki_old_))))); //41 real calc_C_Na3 = (1.0f-(O_Na_old_+C_Na1_old_+C_Na2_old_+IF_Na_old_+I1_Na_old_+I2_Na_old_+IC_Na2_old_+IC_Na3_old_)); //42 real calc_alpha_Na11 = (3.802f/((0.1027f*expf(((-(V_old_+2.5f))/17.0f)))+(0.2f*expf(((-(V_old_+2.5f))/150.0f))))); //51 real calc_alpha_Na12 = (3.802f/((0.1027f*expf(((-(V_old_+2.5f))/15.0f)))+(0.23f*expf(((-(V_old_+2.5f))/150.0f))))); //52 real calc_alpha_Na13 = (3.802f/((0.1027f*expf(((-(V_old_+2.5f))/12.0f)))+(0.25f*expf(((-(V_old_+2.5f))/150.0f))))); //53 real calc_beta_Na11 = (0.1917f*expf(((-(V_old_+2.5f))/20.3f))); //54 real calc_beta_Na12 = (0.2f*expf(((-(V_old_-2.5f))/20.3f))); //55 real calc_beta_Na13 = (0.22f*expf(((-(V_old_-7.5f))/20.3f))); //56 real calc_alpha_Na3 = (7e-7f*expf(((-(V_old_+7.0f))/7.7f))); //57 real calc_beta_Na3 = (0.00854f+(0.00002f*V_old_)); //58 real calc_alpha_Na2 = (1.0f/((0.188495f*expf(((-(V_old_+7.0f))/16.6f)))+0.393956f)); //59 real calc_E_K = (((R*T)/F)*logf((Ko/Ki_old_))); //68 real calc_alpha_a = (0.18064f*expf((0.03577f*(V_old_+ 30.0f)))); //71 real calc_beta_a = (0.3956f*expf(((-0.06237f)*(V_old_+ 30.0f)))); //72 real calc_alpha_i = ((0.000152f*expf(((-(V_old_+13.5f))/7.0f)))/((0.067083f*expf(((-(V_old_+33.5f))/7.0f)))+1.0f)); //73 real calc_beta_i = ((0.00095f*expf(((V_old_+33.5f)/7.0f)))/((0.051335f*expf(((V_old_+33.5f)/7.0f)))+1.0f)); //74 real calc_ass = (1.0f/(1.0f+expf(((-(V_old_+22.5f))/7.7f)))); //78 real calc_iss = (1.0f/(1.0f+expf(((V_old_+45.2f)/5.7f)))); //79 real calc_tau_ta_s = ((0.493f*expf(((-0.0629f)*V_old_)))+2.058f); //80 real calc_tau_ti_s = (270.0f+(1050.0f/(1.0f+expf(((V_old_+45.2f)/5.7f))))); //81 real calc_alpha_n = (V_old_ != -26.5f)?((0.00000481333f*(V_old_+26.5f))/(1.0f-expf(((-0.128f)*(V_old_+26.5f))))): 0.000037604f; //85 real calc_beta_n = (0.0000953333f*expf(((-0.038f)*(V_old_+26.5f)))); //86 real calc_tau_aur = ((0.493f*expf(((-0.0629f)*V_old_)))+2.058f); //90 real calc_tau_iur = (1200.0f-(170.0f/(1.0f+expf(((V_old_+45.2f)/5.7f))))); //91 real calc_tau_Kss = ((39.3f*expf(((-0.0862f)*V_old_)))+13.17f); //95 real calc_i_Kr = (g_Kr*O_K_old_*(V_old_-(((R*T)/F)*logf((((0.98f*Ko)+(0.02f*Nao))/((0.98f*Ki_old_)+(0.02f*Nai_old_))))))); //96 real calc_C_K0 = (1.0f-(C_K1_old_+C_K2_old_+O_K_old_+I_K_old_)); //97 real calc_alpha_a0 = (0.022348f*expf((0.01176f*V_old_))); //102 real calc_beta_a0 = (0.047002f*expf(((-0.0631f)*V_old_))); //103 real calc_alpha_a1 = (0.013733f*expf((0.038198f*V_old_))); //104 real calc_beta_a1 = (0.0000689f*expf(((-0.04178f)*V_old_))); //105 real calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.090821f*expf((0.023391f*(V_old_+5.0f)))); //106 real calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current = (0.006497f*expf(((-0.03268f)*(V_old_+5.0f)))); //107 real calc_sigma = ((1.0f/7.0f)*(expf((Nao/67300.0f))-1.0f)); //110 real calc_O_ClCa = (0.2f/(1.0f+expf(((-(V_old_-46.7f))/7.8f)))); //112 real calc_beta_Na2 = ((calc_alpha_Na13*calc_alpha_Na2*calc_alpha_Na3)/(calc_beta_Na13*calc_beta_Na3)); //60 real calc_alpha_Na4 = (calc_alpha_Na2/1000.0f); //61 real calc_beta_Na4 = calc_alpha_Na3; //62 real calc_alpha_Na5 = (calc_alpha_Na2/95000.0f); //63 real calc_beta_Na5 = (calc_alpha_Na3/50.0f); //64 real calc_i_Nab = (g_Nab*(V_old_-calc_E_Na)); //65 real calc_i_Kto_s = (g_Kto_s*ato_s_old_*ito_s_old_*(V_old_-calc_E_K)); //75 real calc_i_K1 = ((((0.2938f*Ko)/(Ko+210.0f))*(V_old_-calc_E_K))/(1.0f+expf((0.0896f*(V_old_-calc_E_K))))); //82 real calc_i_Ks = (g_Ks*powf(nKs_old_,2.0f)*(V_old_-calc_E_K)); //83 real calc_i_Kur = (g_Kur*aur_old_*iur_old_*(V_old_-calc_E_K)); //87 real calc_i_Kss = (g_Kss*aKss_old_*iKss_old_*(V_old_-calc_E_K)); //92 real calc_i_Cab = (g_Cab*(V_old_-calc_E_CaN)); //37 real calc_i_Na = (g_Na*O_Na_old_*(V_old_-calc_E_Na)); //40 real calc_i_Kto_f = (g_Kto_f*powf(ato_f_old_,3.0)*ito_f_old_*(V_old_-calc_E_K)); //67 real calc_f_NaK = (1.0f/(1.0f+(0.1245f*expf((((-0.1f)*V_old_*F)/(R*T))))+(0.0365f*calc_sigma*expf((((-V_old_)*F)/(R*T)))))); //109 real calc_i_ClCa = (((g_ClCa*calc_O_ClCa*Cai_old_)/(Cai_old_+Km_Cl))*(V_old_-E_Cl)); //111 real calc_i_NaK = ((((i_NaK_max*calc_f_NaK*1.0f)/(1.0f+powf((Km_Nai/Nai_old_),1.5)))*Ko)/(Ko+Km_Ko)); //108 // Differential Equations real d_dt_V = (-(calc_i_CaL+calc_i_pCa+calc_i_NaCa+calc_i_Cab+calc_i_Na+calc_i_Nab+calc_i_NaK+calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kur+calc_i_Kss+calc_i_Kr+calc_i_ClCa+calc_i_stim)); // 1 real d_dt_Cai = (calc_Bi*((calc_J_leak+calc_J_xfer)-(calc_J_up+calc_J_trpn+((((calc_i_Cab+calc_i_pCa)-(2.0f*calc_i_NaCa))*Acap*Cm)/(2.0f*Vmyo*F))))); // 2 real d_dt_Cass = (calc_Bss*(((calc_J_rel*VJSR)/Vss)-(((calc_J_xfer*Vmyo)/Vss)+((calc_i_CaL*Acap*Cm)/(2.0f*Vss*F))))); // 3 real d_dt_CaJSR = (calc_BJSR*(calc_J_tr-calc_J_rel)); // 4 real d_dt_CaNSR = ((((calc_J_up-calc_J_leak)*Vmyo)/VNSR)-((calc_J_tr*VJSR)/VNSR)); // 5 real d_dt_P_RyR = (((-0.04f)*P_RyR_old_)-(((0.1f*calc_i_CaL)/i_CaL_max)*expf(((-powf((V_old_-5.0f),2.0f))/648.0f)))); // 15 real d_dt_LTRPN_Ca = ((k_plus_ltrpn*Cai_old_*(LTRPN_tot-LTRPN_Ca_old_))-(k_minus_ltrpn*LTRPN_Ca_old_)); // 16 real d_dt_HTRPN_Ca = ((k_plus_htrpn*Cai_old_*(HTRPN_tot-HTRPN_Ca_old_))-(k_minus_htrpn*HTRPN_Ca_old_)); // 17 real d_dt_P_O1 = (((k_plus_a*powf(Cass_old_,n)*calc_P_C1)+(k_minus_b*P_O2_old_)+(k_minus_c*P_C2_old_))-((k_minus_a*P_O1_old_)+(k_plus_b*powf(Cass_old_,m)*P_O1_old_)+(k_plus_c*P_O1_old_))); // 18 real d_dt_P_O2 = ((k_plus_b*powf(Cass_old_,m)*P_O1_old_)-(k_minus_b*P_O2_old_)); // 20 real d_dt_P_C2 = ((k_plus_c*P_O1_old_)-(k_minus_c*P_C2_old_)); // 21 real d_dt_O = (((calc_alpha*C4_old_)+(Kpcb*I1_old_)+(0.001f*((calc_alpha*I2_old_)-(calc_Kpcf*O_old_))))-((4.0f*calc_beta*O_old_)+(calc_gamma*O_old_))); // 23 real d_dt_C2 = (((4.0f*calc_alpha*calc_C1)+(2.0f*calc_beta*C3_old_))-((calc_beta*C2_old_)+(3.0f*calc_alpha*C2_old_))); // 25 real d_dt_C3 = (((3.0f*calc_alpha*C2_old_)+(3.0f*calc_beta*C4_old_))-((2.0f*calc_beta*C3_old_)+(2.0f*calc_alpha*C3_old_))); // 26 real d_dt_C4 = (((2.0f*calc_alpha*C3_old_)+(4.0f*calc_beta*O_old_)+(0.01f*((4.0f*Kpcb*calc_beta*I1_old_)-(calc_alpha*calc_gamma*C4_old_)))+(0.002f*((4.0f*calc_beta*I2_old_)-(calc_Kpcf*C4_old_)))+(4.0f*calc_beta*Kpcb*I3_old_))-((3.0f*calc_beta*C4_old_)+(calc_alpha*C4_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))); // 27 real d_dt_I1 = (((calc_gamma*O_old_)+(0.001f*((calc_alpha*I3_old_)-(calc_Kpcf*I1_old_)))+(0.01f*((calc_alpha*calc_gamma*C4_old_)-(4.0f*calc_beta*Kpcb*I1_old_))))-(Kpcb*I1_old_)); // 28 real d_dt_I2 = (((0.001f*((calc_Kpcf*O_old_)-(calc_alpha*I2_old_)))+(Kpcb*I3_old_)+(0.002f*((calc_Kpcf*C4_old_)-(4.0f*calc_beta*I2_old_))))-(calc_gamma*I2_old_)); // 29 real d_dt_I3 = (((0.001f*((calc_Kpcf*I1_old_)-(calc_alpha*I3_old_)))+(calc_gamma*I2_old_)+(1.0f*calc_gamma*calc_Kpcf*C4_old_))-((4.0f*calc_beta*Kpcb*I3_old_)+(Kpcb*I3_old_))); // 30 real d_dt_Nai = (((-(calc_i_Na+calc_i_Nab+(3.0f*calc_i_NaK)+(3.0f*calc_i_NaCa)))*Acap*Cm)/(Vmyo*F)); // 39 real d_dt_C_Na2 = (((calc_alpha_Na11*calc_C_Na3)+(calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na3*IC_Na2_old_))-((calc_beta_Na11*C_Na2_old_)+(calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na3*C_Na2_old_))); // 43 real d_dt_C_Na1 = (((calc_alpha_Na12*C_Na2_old_)+(calc_beta_Na13*O_Na_old_)+(calc_alpha_Na3*IF_Na_old_))-((calc_beta_Na12*C_Na1_old_)+(calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na3*C_Na1_old_))); // 44 real d_dt_O_Na = (((calc_alpha_Na13*C_Na1_old_)+(calc_beta_Na2*IF_Na_old_))-((calc_beta_Na13*O_Na_old_)+(calc_alpha_Na2*O_Na_old_))); // 45 real d_dt_IF_Na = (((calc_alpha_Na2*O_Na_old_)+(calc_beta_Na3*C_Na1_old_)+(calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na12*IC_Na2_old_))-((calc_beta_Na2*IF_Na_old_)+(calc_alpha_Na3*IF_Na_old_)+(calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na12*IF_Na_old_))); // 46 real d_dt_I1_Na = (((calc_alpha_Na4*IF_Na_old_)+(calc_beta_Na5*I2_Na_old_))-((calc_beta_Na4*I1_Na_old_)+(calc_alpha_Na5*I1_Na_old_))); // 47 real d_dt_I2_Na = ((calc_alpha_Na5*I1_Na_old_)-(calc_beta_Na5*I2_Na_old_)); // 48 real d_dt_IC_Na2 = (((calc_alpha_Na11*IC_Na3_old_)+(calc_beta_Na12*IF_Na_old_)+(calc_beta_Na3*C_Na2_old_))-((calc_beta_Na11*IC_Na2_old_)+(calc_alpha_Na12*IC_Na2_old_)+(calc_alpha_Na3*IC_Na2_old_))); // 49 real d_dt_IC_Na3 = (((calc_beta_Na11*IC_Na2_old_)+(calc_beta_Na3*calc_C_Na3))-((calc_alpha_Na11*IC_Na3_old_)+(calc_alpha_Na3*IC_Na3_old_))); // 50 real d_dt_Ki = (((-((calc_i_Kto_f+calc_i_Kto_s+calc_i_K1+calc_i_Ks+calc_i_Kss+calc_i_Kur+calc_i_Kr)-(2.0f*calc_i_NaK)))*Acap*Cm)/(Vmyo*F)); // 66 real d_dt_ato_f = ((calc_alpha_a*(1.0f-ato_f_old_))-(calc_beta_a*ato_f_old_)); // 69 real d_dt_ito_f = ((calc_alpha_i*(1.0f-ito_f_old_))-(calc_beta_i*ito_f_old_)); // 70 real d_dt_ato_s = ((calc_ass-ato_s_old_)/calc_tau_ta_s); // 76 real d_dt_ito_s = ((calc_iss-ito_s_old_)/calc_tau_ti_s); // 77 real d_dt_nKs = ((calc_alpha_n*(1.0f-nKs_old_))-(calc_beta_n*nKs_old_)); // 84 real d_dt_aur = ((calc_ass-aur_old_)/calc_tau_aur); // 88 real d_dt_iur = ((calc_iss-iur_old_)/calc_tau_iur); // 89 real d_dt_aKss = ((calc_ass-aKss_old_)/calc_tau_Kss); // 93 real d_dt_iKss = 0.0f; // 94 real d_dt_C_K2 = (((kf*C_K1_old_)+(calc_beta_a1*O_K_old_))-((kb*C_K2_old_)+(calc_alpha_a1*C_K2_old_))); // 98 real d_dt_C_K1 = (((calc_alpha_a0*calc_C_K0)+(kb*C_K2_old_))-((calc_beta_a0*C_K1_old_)+(kf*C_K1_old_))); // 99 real d_dt_O_K = (((calc_alpha_a1*C_K2_old_)+(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_))-((calc_beta_a1*O_K_old_)+(calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_))); // 100 real d_dt_I_K = ((calc_alpha_i_duplicated_rapid_delayed_rectifier_potassium_current*O_K_old_)-(calc_beta_i_duplicated_rapid_delayed_rectifier_potassium_current*I_K_old_)); // 101 rDY_[0] = d_dt_V; rDY_[1] = d_dt_Cai; rDY_[2] = d_dt_Cass; rDY_[3] = d_dt_CaJSR; rDY_[4] = d_dt_CaNSR; rDY_[5] = d_dt_P_RyR; rDY_[6] = d_dt_LTRPN_Ca; rDY_[7] = d_dt_HTRPN_Ca; rDY_[8] = d_dt_P_O1; rDY_[9] = d_dt_P_O2; rDY_[10] = d_dt_P_C2; rDY_[11] = d_dt_O; rDY_[12] = d_dt_C2; rDY_[13] = d_dt_C3; rDY_[14] = d_dt_C4; rDY_[15] = d_dt_I1; rDY_[16] = d_dt_I2; rDY_[17] = d_dt_I3; rDY_[18] = d_dt_Nai; rDY_[19] = d_dt_C_Na2; rDY_[20] = d_dt_C_Na1; rDY_[21] = d_dt_O_Na; rDY_[22] = d_dt_IF_Na; rDY_[23] = d_dt_I1_Na; rDY_[24] = d_dt_I2_Na; rDY_[25] = d_dt_IC_Na2; rDY_[26] = d_dt_IC_Na3; rDY_[27] = d_dt_Ki; rDY_[28] = d_dt_ato_f; rDY_[29] = d_dt_ito_f; rDY_[30] = d_dt_ato_s; rDY_[31] = d_dt_ito_s; rDY_[32] = d_dt_nKs; rDY_[33] = d_dt_aur; rDY_[34] = d_dt_iur; rDY_[35] = d_dt_aKss; rDY_[36] = d_dt_iKss; rDY_[37] = d_dt_C_K2; rDY_[38] = d_dt_C_K1; rDY_[39] = d_dt_O_K; rDY_[40] = d_dt_I_K; }
conv_direct_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "convolution_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, (size_t)in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int conv3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* weight_tensor; struct tensor* bias_tensor = NULL; struct tensor* output_tensor = NULL; int num_thread = exec_graph->num_thread; /* set the input data and shape again, in case of reshape or dynamic shape */ input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem; int ret = -1; switch(conv_param->stride_h) { case 1: ret = conv3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); break; case 2: ret = conv3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", conv_param->stride_h); } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem; struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* only support int8 */ if (input_tensor->data_type != TENGINE_DT_INT8) return 0; if (group == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST * 2; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_conv_direct_hcl_x86_op() { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } int unregister_conv_direct_hcl_x86_op() { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; }
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include <type_traits> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" using mshadow::red::limits::MinValue; namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template <typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a) / b); } template <typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a) / b); } }; struct log_softmax_fwd { template <typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template <typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template <typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<cpu>* s, DType* in, OType* out, IType* length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size() / M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length == nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j * sa] : in[base + j * sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp((in_val - mmax) / temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map((in_val - mmax) / temperature, sum); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t len = static_cast<index_t>(length[i]); index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < len; ++j) { val = negate ? -in[base + j * sa] : in[base + j * sa]; if (mmax < val) mmax = val; } for (index_t j = len; j < M; ++j) { out[base + j * sa] = OType(0.0f); } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp((in_val - mmax) / temperature); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map((in_val - mmax) / temperature, sum); } } } } } struct masked_softmax_where { template <typename DType, int ndim> MSHADOW_XINLINE static void Map(index_t id, DType* out, const bool* cond, const DType* x, const double y, Shape<ndim> data_shape, Shape<ndim> mask_shape) { index_t mask_pos = 0; index_t stride = 1; for (index_t i = ndim - 1, j = id; i >= 0; --i) { auto tmp = j / data_shape[i]; if (mask_shape[i] != 1) { mask_pos += (j - tmp * mask_shape[i]) * stride; } stride *= mask_shape[i]; j = tmp; } KERNEL_ASSIGN(out[id], kWriteTo, (cond[mask_pos] ? x[id] : static_cast<DType>(y))); } }; template <typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, int ndim> inline void MaskedSoftmax(Stream<cpu>* s, DType* in, DType* out, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, bool normalize, const OpContext& ctx) { Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(data_shape.Size()), s); DType* masked_input = TBlob(workspace).dptr<DType>(); double neg = MinValue<DType>(); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), masked_input, mask, in, neg, data_shape, mask_shape); int* max_lenghts = nullptr; double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; Softmax<OP, negate, AType, DType>( s, masked_input, out, max_lenghts, data_shape, axis, temperature); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), out, mask, out, masked_value, data_shape, mask_shape); } struct softmax_bwd { template <typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template <typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out) * sum); } template <typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out) * sum); } }; template <typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void SoftmaxGrad(Stream<cpu>* s, OType* out, OType* ograd, DType* igrad, IType* length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size() / M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length != nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); index_t len = static_cast<index_t>(length[i]); AType sum = AType(0); for (index_t j = 0; j < len; ++j) { sum += OP1::Map(ograd[base + j * sa], out[base + j * sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum); final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature; final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j * sa], out[base + j * sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum); KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } } } } template <typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType> inline void MaskedSoftmaxGrad(Stream<cpu>* s, DType* out, DType* ograd, DType* igrad, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, const OpContext& ctx) { Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(data_shape.Size()), s); DType* masked_ograd = TBlob(workspace).dptr<DType>(); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), masked_ograd, mask, ograd, 0.0, data_shape, mask_shape); int* max_lenghts = nullptr; SoftmaxGrad<OP1, OP2, Req, negate, AType, DType, DType, int, ndim>( s, out, masked_ograd, igrad, max_lenghts, data_shape, axis, temperature); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), igrad, mask, igrad, 0.0, data_shape, mask_shape); } #ifdef __CUDACC__ const int softmax_threads_per_block = 512; template <int ndim> MSHADOW_XINLINE index_t get_mask_position(const index_t idx, const Shape<ndim>& data_shape, const Shape<ndim>& mask_shape, int axis, index_t* stride_axis) { index_t ret = 0; index_t stride = 1; *stride_axis = 1; #pragma unroll for (index_t i = ndim - 1, j = idx; i >= 0; --i) { auto tmp = j / data_shape[i]; if (i != axis && mask_shape[i] != 1) { ret += (j - tmp * mask_shape[i]) * stride; if (i > axis) *stride_axis *= mask_shape[i]; } stride *= mask_shape[i]; j = tmp; } return ret; } template <bool normalize, int x_bits, typename OP, bool masked_neg_inf, bool negate, typename AType, int ndim, typename DType> __global__ void masked_softmax_kernel(DType* in, DType* out, bool* in_mask, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, Shape<ndim> mask_shape, const double temperature) { extern __shared__ double shared[]; AType* smem = reinterpret_cast<AType*>(shared); // x_size const unsigned x_size = 1 << x_bits; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t sa_mask = 0; index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask); bool bcst_mask_axis = (mask_shape[axis] == 1); index_t x = threadIdx.x; DType smax = 0.0; if (normalize) { red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; if (mask_value) smem[x] = ::max(smem[x], negate ? -in[base + i * sa] : in[base + i * sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); smax = smem[0]; __syncthreads(); } red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; if (mask_value) { val = (negate ? -in[base + i * sa] : in[base + i * sa]); smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; for (index_t i = x; i < M; i += x_size) { val = (negate ? -in[base + i * sa] : in[base + i * sa]); bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; out[base + i * sa] = mask_value ? DType(OP::Map((val - smax) / static_cast<DType>(temperature), ssum)) : DType(masked_value); } } template <bool normalize, typename OP, bool masked_neg_inf, bool negate, typename AType, typename LType, typename LTypeMask, typename DType, int ndim> __global__ void masked_softmax_stride1_kernel(const DType* in, DType* out, bool* in_mask, const index_t M, int axis, Shape<ndim> sshape, Shape<ndim> mask_shape, const double temperature, const int rows_per_block, const index_t total_rows, const size_t size_input_shared, const size_t size_mask_shared) { const int entries_per_load = sizeof(LType) / sizeof(DType); const int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0; extern __shared__ double shared[]; LType* persistent_storage = reinterpret_cast<LType*>(shared); // rows_per_block * M (DType), aligned to double LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]); // rows_per_block * M (bool), aligned to double AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]); // softmax_threads_per_block const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; size_t base = my_row * row_length; index_t pos_mask = 0; index_t stride = mask_shape[axis]; #pragma unroll for (index_t i = axis - 1, j = my_row; i >= 0; --i) { auto tmp = j / sshape[i]; if (mask_shape[i] != 1) { pos_mask += (j - tmp * mask_shape[i]) * stride; stride *= mask_shape[i]; } j = tmp; } const LType* in_aligned = reinterpret_cast<const LType*>(in); for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length + i] = in_aligned[base + i]; } const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]); for (index_t i = my_id; i < row_length_mask; i += threads_per_row) { mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0]; } DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length); bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask); __syncthreads(); DType smax = 0.0; if (normalize) { DType my_max_value; red::maximum::SetInitValue(my_max_value); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]); } scratch[threadIdx.x] = my_max_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]); } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return ::max(x, y); }); scratch[threadIdx.x] = my_value; } __syncthreads(); smax = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); } AType my_sum; red::sum::SetInitValue(my_sum); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) { const DType val = (negate ? -row[i] : row[i]); my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } } scratch[threadIdx.x] = my_sum; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] += scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = (negate ? -row[i] : row[i]); row[i] = row_mask[i] ? DType(OP::Map((val - smax) / static_cast<DType>(temperature), ssum)) : DType(masked_value); } __syncthreads(); LType* out_aligned = reinterpret_cast<LType*>(out); for (index_t i = my_id; i < row_length; i += threads_per_row) { out_aligned[base + i] = persistent_storage[my_local_row * row_length + i]; } } template <typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, typename OType, int ndim> inline void MaskedSoftmax(Stream<gpu>* s, DType* in, OType* out, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, bool normalize, const OpContext& ctx) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = data_shape[axis]; if (M == 0 || data_shape.Size() == 0) return; index_t N = data_shape.Size() / M; Shape<ndim> stride = calc_stride(data_shape); Shape<ndim> sshape = data_shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using max of 20 kB of shared memory for InputData in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { CHECK_LE(sizeof(DType), sizeof(LType)); MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, { CHECK_LE(sizeof(bool), sizeof(LTypeMask)); int rows_per_block = mxnet::common::cuda::get_rows_per_block( M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); // calculate amount shared memory (slots aligned to double) int entries_per_load = entries_per_load = sizeof(LType) / sizeof(DType); int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0; size_t size_mask_shared = entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0; size_input_shared = ((size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double)); size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double)); size_t amount_shared = size_input_shared * sizeof(double) + size_mask_shared * sizeof(double) + softmax_threads_per_block * sizeof(AType); int nblocks = (N + rows_per_block - 1) / rows_per_block; if (normalize) { masked_softmax_stride1_kernel<true, OP, masked_neg_inf, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(in, out, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); } else { masked_softmax_stride1_kernel<false, OP, masked_neg_inf, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(in, out, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); } }); }); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_kernel); } else { size_t amount_shared = x_size * sizeof(AType); if (normalize) { masked_softmax_kernel<true, x_bits, OP, masked_neg_inf, negate, AType, ndim> <<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, stride, mask_shape, temperature); } else { masked_softmax_kernel<false, x_bits, OP, masked_neg_inf, negate, AType, ndim> <<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, stride, mask_shape, temperature); } MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_kernel); } } template <typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType, typename LTypeMask, typename DType, typename OType, int ndim> __global__ void masked_softmax_stride1_grad_kernel(const OType* out, const OType* ograd, DType* igrad, const bool* in_mask, const index_t M, int axis, Shape<ndim> sshape, Shape<ndim> mask_shape, const double temperature, const int rows_per_block, const index_t total_rows, const size_t size_input_shared, const size_t size_mask_shared) { const int entries_per_load = sizeof(LType) / sizeof(DType); const int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0; extern __shared__ double shared[]; LType* persistent_storage = reinterpret_cast<LType*>(shared); // 2 * rows_per_block * M (DType), aligned to double LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]); // rows_per_block * M (bool), aligned to double AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]); // softmax_threads_per_block const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; size_t base = my_row * row_length; index_t pos_mask = 0; index_t stride = mask_shape[axis]; #pragma unroll for (index_t i = axis - 1, j = my_row; i >= 0; --i) { auto tmp = j / sshape[i]; if (mask_shape[i] != 1) { pos_mask += (j - tmp * mask_shape[i]) * stride; stride *= mask_shape[i]; } j = tmp; } const LType* out_aligned = reinterpret_cast<const LType*>(out); const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd); for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i]; persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i]; } const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]); for (index_t i = my_id; i < row_length_mask; i += threads_per_row) { mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0]; } DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length * 2); bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask); __syncthreads(); AType my_sum_value; red::sum::SetInitValue(my_sum_value); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) my_sum_value += OP1::Map(row[i + M], row[i]); } scratch[threadIdx.x] = my_sum_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -OP2::Map(row[i + M], row[i], ssum) : OP2::Map(row[i + M], row[i], ssum); row[i] = row_mask[i] ? DType(val / static_cast<DType>(temperature)) : DType(0.0f); if (Req == kAddTo) { row[i] += igrad[my_row * M + i]; } } __syncthreads(); LType* igrad_aligned = reinterpret_cast<LType*>(igrad); for (index_t i = my_id; i < row_length; i += threads_per_row) { igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i]; } } template <int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> __global__ void masked_softmax_grad_kernel(OType* out, OType* ograd, DType* igrad, const bool* in_mask, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, Shape<ndim> mask_shape, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t sa_mask = 0; index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask); bool bcst_mask_axis = (mask_shape[axis] == 1); index_t x = threadIdx.x; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; if (mask_value) smem[x] += OP1::Map(ograd[base + i * sa], out[base + i * sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; final_result = negate ? -OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum) : OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum); final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f); KERNEL_ASSIGN(igrad[base + i * sa], Req, final_result); } } template <typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> inline void MaskedSoftmaxGrad(Stream<gpu>* s, OType* out, OType* ograd, DType* igrad, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, const OpContext& ctx) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = data_shape[axis]; if (M == 0 || data_shape.Size() == 0) return; index_t N = data_shape.Size() / M; Shape<ndim> stride = calc_stride(data_shape); Shape<ndim> sshape = data_shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using max of 20 kB of shared memory for InputData in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { CHECK_LE(sizeof(DType), sizeof(LType)); MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, { CHECK_LE(sizeof(bool), sizeof(LTypeMask)); int rows_per_block = mxnet::common::cuda::get_rows_per_block( M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); // calculate amount shared memory (slots aligned to double) int entries_per_load = entries_per_load = sizeof(LType) / sizeof(DType); int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0; size_t size_mask_shared = entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0; size_input_shared = ((2 * size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double)); size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double)); size_t amount_shared = size_input_shared * sizeof(double) + size_mask_shared * sizeof(double) + softmax_threads_per_block * sizeof(AType); int nblocks = (N + rows_per_block - 1) / rows_per_block; masked_softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(out, ograd, igrad, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); }); }); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_grad_kernel); } else { masked_softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, mask, M, axis, sshape, stride, mask_shape, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_grad_kernel); } } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> use_length; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1).describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature) .set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe( "DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); DMLC_DECLARE_FIELD(use_length) .set_default(dmlc::optional<bool>(false)) .describe("Whether to use the length input as a mask over the data input."); } bool operator==(const SoftmaxParam& other) const { return this->axis == other.axis && this->temperature == other.temperature && this->dtype == other.dtype && this->use_length == other.use_length; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, temperature_s, dtype_s, use_length_s; axis_s << axis; temperature_s << temperature; dtype_s << dtype; use_length_s << use_length; (*dict)["axis"] = axis_s.str(); (*dict)["temperature"] = temperature_s.str(); if (dtype.has_value()) { (*dict)["dtype"] = MXNetTypeWithBool2String(dtype.value()); } else { (*dict)["dtype"] = dtype_s.str(); } (*dict)["use_length"] = use_length_s.str(); } }; struct MaskedSoftmaxParam : public dmlc::Parameter<MaskedSoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<bool> normalize; DMLC_DECLARE_PARAMETER(MaskedSoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1).describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature) .set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(normalize) .set_default(dmlc::optional<bool>(true)) .describe("Whether to normalize input data x: x = x - max(x)"); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, temperature_s, normalize_s; axis_s << axis; temperature_s << temperature; normalize_s << normalize; (*dict)["axis"] = axis_s.str(); (*dict)["temperature"] = temperature_s.str(); (*dict)["normalize"] = normalize_s.str(); } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.use_length.value(); } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } } static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U); if (param.use_length.value()) { mxnet::TShape& dshape = in_attrs->at(0); mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1); int j = 0; int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1; for (int i = 0; i < dshape.ndim(); ++i) { if (i != axis) { tmp_shape[j++] = dshape[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape); } mxnet::ShapeVector tmp = {in_attrs->at(0)}; return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs); } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)}; mxnet::ShapeVector dgrad = {out_attrs->at(0)}; bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad); SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]); SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]); SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]); mxnet::ShapeVector length = {in_attrs->at(2)}; mxnet::ShapeVector lgrad = {out_attrs->at(1)}; res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad)); SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]); return res; } else { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); if (softmax_use_length(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2)); } return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 && (!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1)); } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int>> SoftmaxGradOpInplaceOption( const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } else { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 0}}; } } else { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { return softmax_use_length(attrs) ? 4 : 3; } return 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::string>{"ograd", "data", "length", "output"}; } else { return std::vector<std::string>{"ograd", "data", "output"}; } } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char* op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) { return ElemwiseGradUseInOut{op_name}(n, ograds); // NOLINT } else { return ElemwiseGradUseOut{op_name}(n, ograds); // NOLINT } } }; static inline bool MaskedSoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); CHECK_EQ(in_attrs->size(), 2U); std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } static inline bool MaskedSoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_shape, mxnet::ShapeVector* out_shape) { CHECK_EQ(out_shape->size(), 1U); CHECK_EQ(in_shape->size(), 2U); mxnet::TShape& data_shape = (*in_shape)[0]; mxnet::TShape& mask_shape = (*in_shape)[1]; if (!mxnet::ndim_is_known(data_shape) || !mxnet::ndim_is_known(mask_shape)) { return false; } CHECK(data_shape.ndim() == mask_shape.ndim()) << "Number of dimensions in data and mask does not match"; CHECK(data_shape.ndim() > 0) << "Empty tuple is not allowed"; for (int i = 0; i < data_shape.ndim(); ++i) { CHECK(data_shape[i] == mask_shape[i] || mask_shape[i] == 1) << "Mask cannot be broadcasted from " << mask_shape << " to " << data_shape; } SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0)); SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0)); return true; } static inline bool MaskedSoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_shape, mxnet::ShapeVector* out_shape) { CHECK_EQ(out_shape->size(), 1U); CHECK_EQ(in_shape->size(), 3U); mxnet::TShape& ograd_shape = (*in_shape)[0]; mxnet::TShape& mask_shape = (*in_shape)[1]; if (!mxnet::ndim_is_known(ograd_shape) || !mxnet::ndim_is_known(mask_shape)) { return false; } CHECK(ograd_shape.ndim() == mask_shape.ndim()) << "Number of dimensions in data and mask does not match"; CHECK(ograd_shape.ndim() > 0) << "Empty tuple is not allowed"; for (int i = 0; i < ograd_shape.ndim(); ++i) { CHECK(ograd_shape[i] == mask_shape[i] || mask_shape[i] == 1) << "Mask cannot be broadcasted from " << mask_shape << " to " << ograd_shape; } SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0)); SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(2)); SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0)); SHAPE_ASSIGN_CHECK(*in_shape, 2, out_shape->at(0)); return true; } static inline bool MaskedSoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->size(), 3U); int data_dtype = (*in_attrs)[0]; TYPE_ASSIGN_CHECK(*in_attrs, 2, data_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, data_dtype); data_dtype = (*out_attrs)[0]; TYPE_ASSIGN_CHECK(*in_attrs, 0, data_dtype); return true; } static inline std::vector<std::pair<int, int>> MaskedSoftmaxGradOpInplaceOption( const nnvm::NodeAttrs& attrs) { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } template <typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce( "MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH( outputs[0].type_flag_, OType, { int type = kInt32; if (param.use_length.value()) { CHECK(inputs.size() > 1) << "Mask needs to be provided when using softmax with use_length=True."; type = inputs[1].type_flag_; } MXNET_INT32_INT64_TYPE_SWITCH(type, IType, { IType* mask_ptr = nullptr; if (param.use_length.value()) { mask_ptr = inputs[1].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { Softmax<OP, negate, AType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { Softmax<OP, negate, DType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, DType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); } template <typename xpu, typename OP, bool masked_neg_inf, bool negate = false> void MaskedSoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce( "MXNET_SAFE_ACCUMULATION=1 is recommended for masked_softmax with " "float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, { bool* mask_ptr = inputs[1].dptr<bool>(); if (safe_acc) { MaskedSoftmax<OP, masked_neg_inf, negate, AType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, temperature, param.normalize.value(), ctx); } else { MaskedSoftmax<OP, masked_neg_inf, negate, DType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, temperature, param.normalize.value(), ctx); } }); }); } #if MXNET_USE_CUDA struct SoftmaxRTCCompute { std::string OP; bool negate = false; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct SoftmaxRTCGradCompute { std::string OP1; std::string OP2; bool negate = false; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif template <typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (softmax_use_length(attrs)) { MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, { if (req[1] != kNullOp) { mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch( ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>()); } }); } if (req[0] == kNullOp) return; const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; out_idx = softmax_use_length(attrs) ? 3 : out_idx; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, { IType* length_ptr = nullptr; if (softmax_use_length(attrs)) { length_ptr = inputs[2].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); }); } template <typename xpu, typename OP1, typename OP2, bool negate = false> void MaskedSoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, { DType* ograd_ptr = inputs[0].dptr<DType>(); DType* out_ptr = inputs[2].dptr<DType>(); bool* mask_ptr = inputs[1].dptr<bool>(); DType* grad_data = outputs[0].dptr<DType>(); if (safe_acc) { MaskedSoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(), out_ptr, ograd_ptr, grad_data, mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, static_cast<DType>(temperature), ctx); } else { MaskedSoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(), out_ptr, ograd_ptr, grad_data, mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, static_cast<DType>(temperature), ctx); } }); }); }); } } // namespace op } // namespace mxnet namespace std { template <> struct hash<mxnet::op::SoftmaxParam> { size_t operator()(const mxnet::op::SoftmaxParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); ret = dmlc::HashCombine(ret, val.temperature); ret = dmlc::HashCombine(ret, val.dtype); ret = dmlc::HashCombine(ret, val.use_length); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
CBasedTraversal.h
/** * @file CBasedTraversal.h * @author C. Menges * @date 26.04.2019 */ #pragma once #include "autopas/containers/cellPairTraversals/CellPairTraversal.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/DataLayoutConverter.h" #include "autopas/utils/ThreeDimensionalMapping.h" namespace autopas { /** * This class provides the base for traversals using base steps based on cell coloring. * * @tparam ParticleCell the type of cells * @tparam PairwiseFunctor The functor that defines the interaction of two particles. * @tparam dataLayout * @tparam useNewton3 * @tparam collapseDepth Set the depth of loop collapsion for OpenMP. Loop variables from outer to inner loop: z,y,x */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3, int collapseDepth = 3> class CBasedTraversal : public CellPairTraversal<ParticleCell> { protected: /** * Constructor of the CBasedTraversal. * @param dims The dimensions of the cellblock, i.e. the number of cells in x, * y and z direction. * @param pairwiseFunctor The functor that defines the interaction of two particles. * @param interactionLength Interaction length (cutoff + skin). * @param cellLength cell length. */ explicit CBasedTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor, const double interactionLength, const std::array<double, 3> &cellLength) : CellPairTraversal<ParticleCell>(dims), _interactionLength(interactionLength), _cellLength(cellLength), _dataLayoutConverter(pairwiseFunctor) { for (unsigned int d = 0; d < 3; d++) { _overlap[d] = std::ceil(_interactionLength / _cellLength[d]); } } /** * Destructor of CBasedTraversal. */ ~CBasedTraversal() override = default; public: /** * load Data Layouts required for this Traversal if cells have been set through setCellsToTraverse(). */ void initTraversal() override { if (this->_cells) { auto &cells = *(this->_cells); #ifdef AUTOPAS_OPENMP // @todo find a condition on when to use omp or when it is just overhead #pragma omp parallel for #endif for (size_t i = 0; i < cells.size(); ++i) { _dataLayoutConverter.loadDataLayout(cells[i]); } } } /** * write Data to AoS if cells have been set through setCellsToTraverse(). */ void endTraversal() override { if (this->_cells) { auto &cells = *(this->_cells); #ifdef AUTOPAS_OPENMP // @todo find a condition on when to use omp or when it is just overhead #pragma omp parallel for #endif for (size_t i = 0; i < cells.size(); ++i) { _dataLayoutConverter.storeDataLayout(cells[i]); } } } protected: /** * The main traversal of the CTraversal. * @tparam LoopBody type of the loop body * @param loopBody The body of the loop as a function. Normally a lambda function, that takes as as parameters * (x,y,z). If you need additional input from outside, please use captures (by reference). * @param end 3D index until interactions are processed (exclusive). * @param stride Distance (in cells) to the next cell of the same color. * @param offset initial offset (in cells) in which cell to start the traversal. */ template <typename LoopBody> inline void cTraversal(LoopBody &&loopBody, const std::array<unsigned long, 3> &end, const std::array<unsigned long, 3> &stride, const std::array<unsigned long, 3> &offset = {0ul, 0ul, 0ul}); /** * This method is called when the color during the traversal has changed. * * @param newColor The new current color. */ virtual void notifyColorChange(unsigned long newColor){}; /** * Interaction length (cutoff + skin). */ const double _interactionLength; /** * cell length in CellBlock3D. */ const std::array<double, 3> _cellLength; /** * overlap of interacting cells. Array allows asymmetric cell sizes. */ std::array<unsigned long, 3> _overlap; private: /** * Data Layout Converter to be used with this traversal */ utils::DataLayoutConverter<PairwiseFunctor, dataLayout> _dataLayoutConverter; }; template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3, int collapseDepth> template <typename LoopBody> inline void CBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, collapseDepth>::cTraversal( LoopBody &&loopBody, const std::array<unsigned long, 3> &end, const std::array<unsigned long, 3> &stride, const std::array<unsigned long, 3> &offset) { #if defined(AUTOPAS_OPENMP) #pragma omp parallel #endif { const unsigned long numColors = stride[0] * stride[1] * stride[2]; for (unsigned long col = 0; col < numColors; ++col) { notifyColorChange(col); std::array<unsigned long, 3> startWithoutOffset(utils::ThreeDimensionalMapping::oneToThreeD(col, stride)); std::array<unsigned long, 3> start(utils::ArrayMath::add(startWithoutOffset, offset)); // intel compiler demands following: const unsigned long start_x = start[0], start_y = start[1], start_z = start[2]; const unsigned long end_x = end[0], end_y = end[1], end_z = end[2]; const unsigned long stride_x = stride[0], stride_y = stride[1], stride_z = stride[2]; if (collapseDepth == 2) { #if defined(AUTOPAS_OPENMP) #pragma omp for schedule(dynamic, 1) collapse(2) #endif for (unsigned long z = start_z; z < end_z; z += stride_z) { for (unsigned long y = start_y; y < end_y; y += stride_y) { for (unsigned long x = start_x; x < end_x; x += stride_x) { // Don't exchange order of execution (x must be last!), it would break other code loopBody(x, y, z); } } } } else { #if defined(AUTOPAS_OPENMP) #pragma omp for schedule(dynamic, 1) collapse(3) #endif for (unsigned long z = start_z; z < end_z; z += stride_z) { for (unsigned long y = start_y; y < end_y; y += stride_y) { for (unsigned long x = start_x; x < end_x; x += stride_x) { // Don't exchange order of execution (x must be last!), it would break other code loopBody(x, y, z); } } } } } } } } // namespace autopas
cpu_resample.h
// Copyright (C) 2016 Gernot Riegler // Institute for Computer Graphics and Vision (ICG) // Graz University of Technology (TU GRAZ) // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. All advertising materials mentioning features or use of this software // must display the following acknowledgement: // This product includes software developed by the ICG, TU GRAZ. // 4. Neither the name of the ICG, TU GRAZ nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. void TEMPLATE(rv_resample_fwd, T)(TEMPLATE(rv_tensor, T) out, const TEMPLATE(rv_tensor, T) in, T height_factor, T width_factor, T (*inter_kernel)(T), long kernel_width, T kernel_scale) { long in_height = in.height; long in_width = in.width; long num_channels = in.num * in.channels; long out_height = round(height_factor * in_height); long out_width = round(width_factor * in_width); long kernel_width_half = ceil(kernel_width / 2.0); long n; #pragma omp parallel for private(n) for(n = 0; n < num_channels; ++n) { long h; for(h = 0; h < out_height; ++h) { long w; for(w = 0; w < out_width; ++w) { T yo = h + 0.5; T xo = w + 0.5; T yi = yo / height_factor; T xi = xo / width_factor; // printf("@ h=%d, w=%d | yi=%f, xi=%f | yo=%f, xo=%f | kernel_width=%d, kernel_width_half=%d\n", h, w, yi, xi, yo, xo, kernel_width, kernel_width_half); T val = 0; long kh; for(kh = -kernel_width_half+1; kh <= kernel_width_half; ++kh) { long kw; for(kw = -kernel_width_half+1; kw <= kernel_width_half; ++kw) { T y = round(yi) + kh - 0.5; T x = round(xi) + kw - 0.5; T kx_val = kernel_scale * (*inter_kernel)(kernel_scale * (xi - x)); T ky_val = kernel_scale * (*inter_kernel)(kernel_scale * (yi - y)); T q = TEMPLATE(rv_get_val_hwbounds, T)(in, 0, n, y, x); // printf(" @ kh=%d, kw=%d | y=%f, x=%f | kyval=%f, kxval=%f, q=%f\n", kh, kw, y, x, ky_val, kx_val, q); val = val + ky_val * kx_val * q; } } long out_idx = (n * out_height + h) * out_width + w; out.data[out_idx] = val; } } } } void TEMPLATE(rv_resample_bwd, T)(TEMPLATE(rv_tensor, T) out, const TEMPLATE(rv_tensor, T) in, T height_factor, T width_factor, T (*inter_kernel)(T), long kernel_width, T kernel_scale) { long in_height = in.height; long in_width = in.width; long num_channels = in.num * in.channels; long out_height = round(height_factor * in_height); long out_width = round(width_factor * in_width); long kernel_width_half = ceil(kernel_width / 2.0); TEMPLATE(rv_zero, T)(in); long n; #pragma omp parallel for private(n) for(n = 0; n < num_channels; ++n) { long h; for(h = 0; h < out_height; ++h) { long w; for(w = 0; w < out_width; ++w) { long out_idx = (n * out_height + h) * out_width + w; T out_val = out.data[out_idx]; T yo = h + 0.5; T xo = w + 0.5; T yi = yo / height_factor; T xi = xo / width_factor; long kh; for(kh = -kernel_width_half+1; kh <= kernel_width_half; ++kh) { long kw; for(kw = -kernel_width_half+1; kw <= kernel_width_half; ++kw) { T y = round(yi) + kh - 0.5; T x = round(xi) + kw - 0.5; T kx_val = kernel_scale * (*inter_kernel)(kernel_scale * (xi - x)); T ky_val = kernel_scale * (*inter_kernel)(kernel_scale * (yi - y)); long x_ = TEMPLATE(rv_clamp_coord, T)(x, in_width); long y_ = TEMPLATE(rv_clamp_coord, T)(y, in_height); long in_idx = (n * in_height + y_) * in_width + x_; in.data[in_idx] = in.data[in_idx] + ky_val * kx_val * out_val; // printf(" x_=%d, y_=%d, in_idx=%d | ky_val=%f, kx_val=%f, out_val=%f\n", x_, y_, in_idx, ky_val, kx_val, out_val); } } } } } }