source
stringlengths
3
92
c
stringlengths
26
2.25M
tensor_convert.h
/* Copyright 2019 The Blueoil Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef DLK_TENSOR_CONVERT_H_INCLUDED #define DLK_TENSOR_CONVERT_H_INCLUDED #include "global.h" #include "tensor_view.h" #include "time_measurement.h" #include "func/impl/quantized_conv2d_kn2row.h" #include "func/impl/quantized_conv2d_tiling.h" #ifdef USE_NEON #include <arm_neon.h> #endif #ifdef _OPENMP #include <omp.h> #endif inline void convert_tensor(const TensorView<BIN_CONV_OUTPUT, MemoryLayout::HWC>& before, const TensorView<BIN_CONV_OUTPUT, MemoryLayout::ChHWCl>& after) { const auto in_shape = before.get_shape(); const auto in_height = in_shape[0]; const auto in_width = in_shape[1]; const auto out_shape = after.get_shape(); const auto channel_high = out_shape[0]; const auto channel_low = out_shape[3]; Measurement::Start("Convert Tensor"); for (std::size_t dh = 0; dh < channel_high; ++dh) for (std::size_t r = 0; r < in_height; ++r) for (std::size_t c = 0; c < in_width; ++c) for (std::size_t dl = 0; dl < channel_low; ++dl) after(dh, r, c, dl) = before(r, c, dh * channel_low + dl); Measurement::Stop(); } inline void convert_tensor(const TensorView<QUANTIZED_PACKED, MemoryLayout::HWChBCl>& before, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& after) { const auto in_shape = before.get_shape(); const auto height = in_shape[0]; const auto width = in_shape[1]; const auto channel = in_shape[2]; const auto bits = in_shape[3]; Measurement::Start("Convert Tensor"); #pragma omp parallel for for (std::size_t i = 0; i < height; ++i) for (std::size_t j = 0; j < width; ++j) for (std::size_t k = 0; k < channel; ++k) { const auto idx_before = i * width * channel * bits + j * channel * bits + k * bits; const auto idx_after = k * height * width * bits + i * width * bits + j * bits; #ifdef AARCH32 const auto tmp = vld1_u32(reinterpret_cast<uint32_t*>(before.data() + idx_before)); vst1_u32(reinterpret_cast<uint32_t*>(after.data() + idx_after), tmp); #else *reinterpret_cast<uint64_t*>(after.data() + idx_after) = *reinterpret_cast<uint64_t*>(before.data() + idx_before); #endif } Measurement::Stop(); } inline void convert_tensor(const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& before, const TensorView<QUANTIZED_PACKED, MemoryLayout::HWChBCl>& after) { const auto in_shape = before.get_shape(); const auto height = in_shape[1]; const auto width = in_shape[2]; const auto channel = in_shape[0]; const auto bits = in_shape[3]; Measurement::Start("Convert Tensor"); #pragma omp parallel for for (std::size_t i = 0; i < height; ++i) for (std::size_t j = 0; j < width; ++j) for (std::size_t k = 0; k < channel; ++k) { const auto idx_before = k * height * width * bits + i * width * bits + j * bits; const auto idx_after = i * width * channel * bits + j * channel * bits + k * bits; #ifdef AARCH32 const auto tmp = vld1_u32(reinterpret_cast<uint32_t*>(before.data() + idx_before)); vst1_u32(reinterpret_cast<uint32_t*>(after.data() + idx_after), tmp); #else *reinterpret_cast<uint64_t*>(after.data() + idx_after) = *reinterpret_cast<uint64_t*>(before.data() + idx_before); #endif } Measurement::Stop(); } inline void convert_tensor(const TensorView<QUANTIZED_NOT_PACKED, MemoryLayout::NHWC>& before, const dlk::impl::tiling_input_t& after) { dlk::impl::pack_input_for_tiling(before, after); } template <typename T, MemoryLayout layout> void convert_tensor(const TensorView<T, layout>& before, const TensorView<T, layout>& after) { const auto num_elems = before.size(); Measurement::Start("Convert Tensor"); #ifdef _OPENMP const auto num_threads = omp_get_max_threads(); const auto chunk_size = (num_elems + num_threads - 1) / num_threads; #pragma omp parallel for for (int i = 0; i < num_elems; i += chunk_size) { std::copy(before.data() + i, before.data() + std::min(i + chunk_size, num_elems), after.data() + i); } #else std::copy(before.data(), before.data() + num_elems, after.data()); #endif Measurement::Stop(); } #endif
opencl_pgpdisk_fmt_plug.c
/* * Format for brute-forcing PGP Virtual Disk images. * * This software is Copyright (c) 2017 Dhiru Kholia <dhiru at openwall.net> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_pgpdisk; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_pgpdisk); #else #include <stdint.h> #include <string.h> #include <openssl/cast.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "aes.h" #include "twofish.h" #include "sha.h" #include "common-opencl.h" #include "options.h" #include "pgpdisk_common.h" #define FORMAT_LABEL "pgpdisk-opencl" #define ALGORITHM_NAME "SHA1 OpenCL" #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 124 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } pgpdisk_password; typedef struct { uint8_t v[32]; } pgpdisk_hash; typedef struct { uint32_t saltlen; uint32_t iterations; uint32_t key_len; uint8_t salt[16]; } pgpdisk_salt; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; static struct custom_salt *cur_salt; static cl_int cl_error; static pgpdisk_password *inbuffer; static pgpdisk_hash *outbuffer; static pgpdisk_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize; // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(pgpdisk_password) * gws; outsize = sizeof(pgpdisk_hash) * gws; settingsize = sizeof(pgpdisk_salt); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (inbuffer) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); Twofish_initialise(); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d", PLAINTEXT_LENGTH); opencl_init("$JOHN/kernels/pgpdisk_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "pgpdisk", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(pgpdisk_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 300); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; currentsalt.iterations = cur_salt->iterations; if (cur_salt->algorithm == 3) { currentsalt.key_len = 16; currentsalt.saltlen= 8; } else { currentsalt.key_len = 32; currentsalt.saltlen = 16; } memcpy((char*)currentsalt.salt, cur_salt->salt, currentsalt.saltlen); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint32_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint32_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char key[40]; memcpy(key, outbuffer[index].v, 32); if (cur_salt->algorithm == 5 || cur_salt->algorithm == 6 || cur_salt->algorithm == 7) { AES_KEY aes_key; AES_set_encrypt_key(key, 256, &aes_key); AES_ecb_encrypt(key, (unsigned char*)crypt_out[index], &aes_key, AES_ENCRYPT); } else if (cur_salt->algorithm == 4) { Twofish_key tkey; Twofish_prepare_key(key, 32, &tkey); Twofish_encrypt(&tkey, key, (unsigned char*)crypt_out[index]); } else if (cur_salt->algorithm == 3) { CAST_KEY ck; CAST_set_key(&ck, 16, key); memset((unsigned char*)crypt_out[index], 0, BINARY_SIZE); CAST_ecb_encrypt(key, (unsigned char*)crypt_out[index], &ck, CAST_ENCRYPT); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_pgpdisk = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, pgpdisk_tests, }, { init, done, reset, fmt_default_prepare, pgpdisk_common_valid, fmt_default_split, get_binary, pgpdisk_common_get_salt, { 0 }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_unaryop__ainv_uint16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_uint64 // op(A') function: GB_tran__ainv_uint16_uint64 // C type: uint16_t // A type: uint64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_uint64 ( uint16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
als.c
#include "completion.h" #include "../csf.h" #include "../util.h" #include "../io.h" #include "../sort.h" #include <math.h> #include <omp.h> #include <time.h> #include <sys/time.h> /* TODO: Conditionally include this OR define lapack prototypes below? * What does this offer beyond prototypes? Can we detect at compile time * if we are using MKL vs ATLAS, etc.? */ //#include <mkl.h> /* Use hardcoded 3-mode kernels when possible. Results in small speedups. */ #ifndef USE_3MODE_OPT #define USE_3MODE_OPT 0 #endif /****************************************************************************** * LAPACK PROTOTYPES *****************************************************************************/ /* * TODO: Can this be done in a better way? */ #if SPLATT_VAL_TYPEWIDTH == 32 void spotrf_(char *, int *, float *, int *, int *); void spotrs_(char *, int *, int *, float *, int *, float *, int *, int *); void ssyrk_(char *, char *, int *, int *, char *, char *, int *, char *, char *, int *); #define LAPACK_DPOTRF spotrf_ #define LAPACK_DPOTRS spotrs_ #define LAPACK_DSYRK ssyrk_ #else void dpotrf_(char *, int *, double *, int *, int *); void dpotrs_(char *, int *, int *, double *, int *, double *, int *, int *); void dsyrk_(char *, char *, int *, int *, double *, double *, int *, double *, double *, int *); #define LAPACK_DPOTRF dpotrf_ #define LAPACK_DPOTRS dpotrs_ #define LAPACK_DSYRK dsyrk_ #endif /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ static inline void p_add_hada_clear( val_t * const restrict accum, val_t * const restrict toclear, val_t const * const restrict b, idx_t const nfactors) { for(idx_t f=0; f < nfactors; ++f) { accum[f] += toclear[f] * b[f]; toclear[f] = 0; } } /** * @brief Compute the Cholesky decomposition of the normal equations and solve * for out_row. We only compute the upper-triangular portion of 'neqs', * so work with the lower-triangular portion when column-major * (for Fortran). * * @param neqs The NxN normal equations. * @param[out] out_row The RHS of the equation. Updated in place. * @param N The rank of the problem. */ static inline void p_invert_row( val_t * const restrict neqs, val_t * const restrict out_row, idx_t const N) { char uplo = 'L'; int order = (int) N; int lda = (int) N; int info; LAPACK_DPOTRF(&uplo, &order, neqs, &lda, &info); if(info) { fprintf(stderr, "SPLATT: DPOTRF returned %d\n", info); } int nrhs = 1; int ldb = (int) N; LAPACK_DPOTRS(&uplo, &order, &nrhs, neqs, &lda, out_row, &ldb, &info); if(info) { fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info); } } /** * @brief Compute DSYRK: out += A^T * A, a rank-k update. Only compute * the upper-triangular portion. * * @param A The input row(s) to update with. * @param N The length of 'A'. * @param nvecs The number of rows in 'A'. * @param nflush Then number of times this has been performed (this slice). * @param[out] out The NxN matrix to update. */ static inline void p_vec_oprod( val_t * const restrict A, idx_t const N, idx_t const nvecs, idx_t const nflush, val_t * const restrict out) { char uplo = 'L'; char trans = 'N'; int order = (int) N; int k = (int) nvecs; int lda = (int) N; int ldc = (int) N; val_t alpha = 1; val_t beta = (nflush == 0) ? 0. : 1.; LAPACK_DSYRK(&uplo, &trans, &order, &k, &alpha, A, &lda, &beta, out, &ldc); } static void p_process_tile3( splatt_csf const * const csf, idx_t const tile, tc_model * const model, tc_ws * const ws, thd_info * const thd_densefactors, int const tid) { csf_sparsity const * const pt = csf->pt + tile; /* empty tile */ if(pt->vals == 0) { return; } idx_t const nfactors = model->rank; idx_t const * const restrict sptr = pt->fptr[0]; idx_t const * const restrict fptr = pt->fptr[1]; idx_t const * const restrict fids = pt->fids[1]; idx_t const * const restrict inds = pt->fids[2]; val_t const * const restrict avals = model->factors[csf->dim_perm[1]]; val_t const * const restrict bvals = model->factors[csf->dim_perm[2]]; val_t const * const restrict vals = pt->vals; /* buffers */ val_t * const restrict accum = ws->thds[tid].scratch[1]; val_t * const restrict mat_accum = ws->thds[tid].scratch[3]; /* update each slice */ idx_t const nslices = pt->nfibs[0]; for(idx_t i=0; i < nslices; ++i) { /* fid is the row we are actually updating */ idx_t const fid = (pt->fids[0] == NULL) ? i : pt->fids[0][i]; /* replicated structures */ val_t * const restrict out_row = (val_t *) thd_densefactors[tid].scratch[0] + (fid * nfactors); val_t * const restrict neqs = (val_t *) thd_densefactors[tid].scratch[1] + (fid*nfactors*nfactors); idx_t bufsize = 0; /* how many hada vecs are in mat_accum */ idx_t nflush = 1; /* how many times we have flushed to add to the neqs */ val_t * restrict hada = mat_accum; /* process each fiber */ for(idx_t fib=sptr[i]; fib < sptr[i+1]; ++fib) { val_t const * const restrict av = avals + (fids[fib] * nfactors); /* first entry of the fiber is used to initialize accum */ idx_t const jjfirst = fptr[fib]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accum[r] = vfirst * bv[r]; hada[r] = av[r] * bv[r]; } hada += nfactors; if(++bufsize == ALS_BUFSIZE) { /* add to normal equations */ p_vec_oprod(mat_accum, nfactors, bufsize, nflush++, neqs); hada = mat_accum; bufsize = 0; } /* foreach nnz in fiber */ for(idx_t jj=fptr[fib]+1; jj < fptr[fib+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accum[r] += v * bv[r]; hada[r] = av[r] * bv[r]; } hada += nfactors; if(++bufsize == ALS_BUFSIZE) { /* add to normal equations */ p_vec_oprod(mat_accum, nfactors, bufsize, nflush++, neqs); hada = mat_accum; bufsize = 0; } } /* accumulate into output row */ for(idx_t r=0; r < nfactors; ++r) { out_row[r] += accum[r] * av[r]; } } /* foreach fiber */ /* final flush */ p_vec_oprod(mat_accum, nfactors, bufsize, nflush++, neqs); } /* foreach slice */ } static void p_process_slice( splatt_csf const * const csf, idx_t const tile, idx_t const i, val_t * * mvals, idx_t const nfactors, val_t * const restrict out_row, val_t * const accum, val_t * const restrict neqs, val_t * const restrict neqs_buf, val_t * const neqs_buf_tree, idx_t * const nflush, double *mttkrp_time); static void p_process_tile( splatt_csf const * const csf, idx_t const tile, tc_model * const model, tc_ws * const ws, thd_info * const thd_densefactors, int const tid, double *mttkrp_time) { csf_sparsity const * const pt = csf->pt + tile; /* empty tile */ if(pt->vals == 0) { return; } idx_t const nmodes = csf->nmodes; #if USE_3MODE_OPT if(nmodes == 3) { p_process_tile3(csf, tile, model, ws, thd_densefactors, tid); return; } #endif idx_t const nfactors = model->rank; /* buffers */ val_t * const restrict accum = ws->thds[tid].scratch[1]; val_t * const restrict mat_accum = ws->thds[tid].scratch[3]; val_t * const restrict hada_accum = ws->thds[tid].scratch[4]; val_t * mvals[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mvals[m] = model->factors[csf->dim_perm[m]]; } /* update each slice */ idx_t const nslices = pt->nfibs[0]; for(idx_t i=0; i < nslices; ++i) { /* fid is the row we are actually updating */ idx_t const fid = (pt->fids[0] == NULL) ? i : pt->fids[0][i]; /* replicated structures */ val_t * const restrict out_row = (val_t *) thd_densefactors[tid].scratch[0] + (fid * nfactors); val_t * const restrict neqs = (val_t *) thd_densefactors[tid].scratch[1] + (fid*nfactors*nfactors); idx_t bufsize = 0; /* how many hada vecs are in mat_accum */ idx_t nflush = 1; /* how many times we have flushed to add to the neqs */ val_t * restrict hada = mat_accum; /* process each fiber */ p_process_slice(csf, tile, i, mvals, nfactors, out_row, accum, neqs, mat_accum, hada_accum, &nflush, mttkrp_time); } /* foreach slice */ } static void p_process_slice3( splatt_csf const * const csf, idx_t const tile, idx_t const i, val_t const * const restrict A, val_t const * const restrict B, idx_t const nfactors, val_t * const restrict out_row, val_t * const restrict accum, val_t * const restrict neqs, val_t * const restrict neqs_buf, idx_t * const nflush) { csf_sparsity const * const pt = csf->pt + tile; idx_t const * const restrict sptr = pt->fptr[0]; idx_t const * const restrict fptr = pt->fptr[1]; idx_t const * const restrict fids = pt->fids[1]; idx_t const * const restrict inds = pt->fids[2]; val_t const * const restrict vals = pt->vals; val_t * hada = neqs_buf; idx_t bufsize = 0; /* process each fiber */ for(idx_t fib=sptr[i]; fib < sptr[i+1]; ++fib) { val_t const * const restrict av = A + (fids[fib] * nfactors); /* first entry of the fiber is used to initialize accum */ idx_t const jjfirst = fptr[fib]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = B + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accum[r] = vfirst * bv[r]; hada[r] = av[r] * bv[r]; } hada += nfactors; if(++bufsize == ALS_BUFSIZE) { /* add to normal equations */ p_vec_oprod(neqs_buf, nfactors, bufsize, (*nflush)++, neqs); bufsize = 0; hada = neqs_buf; } /* foreach nnz in fiber */ for(idx_t jj=fptr[fib]+1; jj < fptr[fib+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = B + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accum[r] += v * bv[r]; hada[r] = av[r] * bv[r]; } hada += nfactors; if(++bufsize == ALS_BUFSIZE) { /* add to normal equations */ p_vec_oprod(neqs_buf, nfactors, bufsize, (*nflush)++, neqs); bufsize = 0; hada = neqs_buf; } } /* accumulate into output row */ for(idx_t r=0; r < nfactors; ++r) { out_row[r] += accum[r] * av[r]; } } /* foreach fiber */ /* final flush */ p_vec_oprod(neqs_buf, nfactors, bufsize, (*nflush)++, neqs); } static void p_process_slice( splatt_csf const * const csf, idx_t const tile, idx_t const i, val_t * * mvals, idx_t const nfactors, val_t * const restrict out_row, val_t * const accum, val_t * const restrict neqs, val_t * const restrict neqs_buf, val_t * const neqs_buf_tree, idx_t * const nflush, double *mttkrp_time) { struct timeval start_t, stop_t; idx_t const nmodes = csf->nmodes; csf_sparsity const * const pt = csf->pt + tile; val_t const * const restrict vals = pt->vals; if(vals == NULL) { return; } #if USE_3MODE_OPT if(nmodes == 3) { p_process_slice3(csf, tile, i, mvals[1], mvals[2], nfactors, out_row, accum, neqs, neqs_buf, nflush); return; } #endif idx_t const * const * const restrict fp = (idx_t const * const *) pt->fptr; idx_t const * const * const restrict fids = (idx_t const * const *) pt->fids; idx_t const * const restrict inds = fids[nmodes-1]; val_t const * const restrict lastmat = mvals[nmodes-1]; idx_t bufsize = 0; val_t * hada = neqs_buf; gettimeofday(&start_t, NULL); /* push initial idx initialize idxstack */ idx_t idxstack[MAX_NMODES]; idxstack[0] = i; for(idx_t m=1; m < nmodes-1; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } idx_t const top_id = (pt->fids[0] == NULL) ? i : pt->fids[0][i]; val_t const * const restrict rootrow = mvals[0] + (top_id * nfactors); for(idx_t f=0; f < nfactors; ++f) { neqs_buf_tree[f] = 1.; } /* clear out accumulation buffer */ for(idx_t f=0; f < nfactors; ++f) { accum[f + nfactors] = 0; } /* process each subtree */ idx_t depth = 0; while(idxstack[1] < fp[0][i+1]) { /* move down to nnz node while forming hada */ for(; depth < nmodes-2; ++depth) { val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); val_t const * const restrict cur_buf = neqs_buf_tree + ((depth+0) * nfactors); val_t * const restrict nxt_buf = neqs_buf_tree + ((depth+1) * nfactors); for(idx_t f=0; f < nfactors; ++f) { nxt_buf[f] = cur_buf[f] * drow[f]; } } val_t * const restrict last_hada = neqs_buf_tree + (depth * nfactors); val_t * const restrict accum_nnz = accum + ((depth+1) * nfactors); /* process all nonzeros [start, end) */ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; for(idx_t jj=start; jj < end; ++jj) { val_t const v = vals[jj]; val_t const * const restrict lastrow = lastmat + (inds[jj] * nfactors); /* process nnz */ for(idx_t f=0; f < nfactors; ++f) { accum_nnz[f] += v * lastrow[f]; hada[f] = last_hada[f] * lastrow[f]; } /* add to normal equations */ hada += nfactors; if(++bufsize == ALS_BUFSIZE) { p_vec_oprod(neqs_buf, nfactors, bufsize, (*nflush)++, neqs); bufsize = 0; hada = neqs_buf; } } idxstack[depth+1] = end; /* propagate MTTKRP up */ do { val_t const * const restrict fibrow = mvals[depth] + (fids[depth][idxstack[depth]] * nfactors); val_t * const restrict up = accum + ((depth+0) * nfactors); val_t * const restrict down = accum + ((depth+1) * nfactors); /* * up[:] += down[:] * fibrow[:]; * down[:] = 0.; */ p_add_hada_clear(up, down, fibrow, nfactors); ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* foreach fiber subtree */ /* accumulate into output row */ for(idx_t f=0; f < nfactors; ++f) { out_row[f] += accum[f + nfactors]; } gettimeofday(&stop_t, NULL); *mttkrp_time += (stop_t.tv_sec + stop_t.tv_usec/1000000.0) - (start_t.tv_sec + start_t.tv_usec/1000000.0); /* final flush */ p_vec_oprod(neqs_buf, nfactors, bufsize, (*nflush)++, neqs); } /** * @brief Compute the i-ith row of the MTTKRP, form the normal equations, and * store the new row. * * @param csf The tensor of training data. * @param tile The tile that row i resides in. * @param i The row to update. * @param reg Regularization parameter for the i-th row. * @param model The model to update * @param ws Workspace. * @param tid OpenMP thread id. */ static void p_update_slice( splatt_csf const * const csf, idx_t const tile, idx_t const i, val_t const reg, tc_model * const model, tc_ws * const ws, int const tid, double *solving_time, double *mttkrp_time) { idx_t const nmodes = csf->nmodes; idx_t const nfactors = model->rank; csf_sparsity const * const pt = csf->pt + tile; /* fid is the row we are actually updating */ idx_t const fid = (pt->fids[0] == NULL) ? i : pt->fids[0][i]; #ifdef SPLATT_USE_MPI assert(fid < model->globmats[csf->dim_perm[0]]->I); val_t * const restrict out_row = model->globmats[csf->dim_perm[0]]->vals + (fid * nfactors); #else val_t * const restrict out_row = model->factors[csf->dim_perm[0]] + (fid * nfactors); #endif val_t * const restrict accum = ws->thds[tid].scratch[1]; val_t * const restrict neqs = ws->thds[tid].scratch[2]; idx_t bufsize = 0; /* how many hada vecs are in mat_accum */ idx_t nflush = 0; /* how many times we have flushed to add to the neqs */ val_t * const restrict mat_accum = ws->thds[tid].scratch[3]; val_t * hada = mat_accum; val_t * const restrict hada_accum = ws->thds[tid].scratch[4]; /* clear out buffers */ for(idx_t m=0; m < nmodes; ++m) { for(idx_t f=0; f < nfactors; ++f) { accum[f + (m*nfactors)] = 0.; } for(idx_t f=0; f < nfactors; ++f) { hada_accum[f + (m*nfactors)] = 0.; } } for(idx_t f=0; f < nfactors; ++f) { out_row[f] = 0; } /* grab factors */ val_t * mats[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mats[m] = model->factors[csf->dim_perm[m]]; } /* do MTTKRP + dsyrk */ p_process_slice(csf, 0, i, mats, nfactors, out_row, accum, neqs, mat_accum, hada_accum, &nflush, mttkrp_time); struct timeval start, stop; gettimeofday(&start, NULL); /* add regularization to the diagonal */ for(idx_t f=0; f < nfactors; ++f) { neqs[f + (f * nfactors)] += reg; } /* solve! */ p_invert_row(neqs, out_row, nfactors); gettimeofday(&stop, NULL); *solving_time += (stop.tv_sec + stop.tv_usec/1000000.0) - (start.tv_sec + start.tv_usec/1000000.0); } /** * @brief Update factor[m] which follows a dense mode. This function should be * called from inside an OpenMP parallel region! * * @param csf The CSF tensor array. csf[m] is a tiled tensor. * @param m The mode we are updating. * @param model The current model. * @param ws Workspace info. * @param thd_densefactors Thread structures for the dense mode. * @param tid Thread ID. */ static void p_densemode_als_update( splatt_csf const * const csf, idx_t const m, tc_model * const model, tc_ws * const ws, thd_info * const thd_densefactors, int const tid, double *solving_time, double *mttkrp_time) { struct timeval start, stop; idx_t const rank = model->rank; /* master thread writes/aggregates directly to the model */ #pragma omp master #ifdef SPLATT_USE_MPI SPLATT_VPTR_SWAP(thd_densefactors[0].scratch[0], model->globmats[m]->vals); idx_t const dense_slices = model->globmats[m]->I; #else SPLATT_VPTR_SWAP(thd_densefactors[0].scratch[0], model->factors[m]); idx_t const dense_slices = model->dims[m]; #endif /* TODO: this could be better by instead only initializing neqs with beta=0 * and keeping track of which have been updated. */ memset(thd_densefactors[tid].scratch[0], 0, dense_slices * rank * sizeof(val_t)); memset(thd_densefactors[tid].scratch[1], 0, dense_slices * rank * rank * sizeof(val_t)); #pragma omp barrier /* update each tile in parallel */ #pragma omp for schedule(dynamic, 1) for(idx_t tile=0; tile < csf[m].ntiles; ++tile) { p_process_tile(csf+m, tile, model, ws, thd_densefactors, tid, mttkrp_time); } /* aggregate partial products */ thd_reduce(thd_densefactors, 0, dense_slices * rank, REDUCE_SUM); /* TODO: this could be better by using a custom reduction which only * operates on the upper triangular portion. OpenMP 4 declare reduction * would be good here? */ thd_reduce(thd_densefactors, 1, dense_slices * rank * rank, REDUCE_SUM); /* save result to model */ #pragma omp master #ifdef SPLATT_USE_MPI SPLATT_VPTR_SWAP(thd_densefactors[0].scratch[0], model->globmats[m]->vals); #else SPLATT_VPTR_SWAP(thd_densefactors[0].scratch[0], model->factors[m]); #endif #pragma omp barrier /* do all of the Cholesky factorizations */ #ifdef SPLATT_USE_MPI val_t * const restrict out = model->globmats[m]->vals; #else val_t * const restrict out = model->factors[m]; #endif val_t const reg = ws->regularization[m]; #pragma omp for schedule(static, 1) for(idx_t i=0; i < dense_slices; ++i) { gettimeofday(&start, NULL); val_t * const restrict neqs_i = (val_t *) thd_densefactors[0].scratch[1] + (i * rank * rank); /* add regularization */ for(idx_t f=0; f < rank; ++f) { neqs_i[f + (f * rank)] += reg; } /* Cholesky + solve */ p_invert_row(neqs_i, out + (i * rank), rank); gettimeofday(&stop, NULL); *solving_time += (stop.tv_sec + stop.tv_usec/1000000.0) - (start.tv_sec + start.tv_usec/1000000.0); } } static void getLvrgScore(val_t * A, val_t **lev_score, idx_t rank, idx_t nrows, int factor){ char jobu = 'S'; char jobvt = 'N'; int m = (int)nrows; int n = (int)rank; int lda = m; int ldu = m; int ldvt = n; double *S = (double *)malloc(n * sizeof(double)); double *U = (double *)malloc((ldu*n) * sizeof(double)); double *VT; int lwork = -1; double wkopt; double *work; int info; double *a = (double *)malloc((nrows*rank) * sizeof(double)); for(int i=0; i<nrows; i++) for(int j=0; j<rank; j++) a[j*nrows + i] = A[i*rank + j]; // Query and allocate appropriate workspace dgesvd_(&jobu, &jobvt, &m, &n, a, &lda, S, U, &ldu, VT, &ldvt, &wkopt, &lwork, &info); if(info) printf("info return %d\n",info); lwork = (int)wkopt; work = (double *)malloc(lwork*sizeof(double)); /* Compute SVD */ dgesvd_(&jobu, &jobvt, &m, &n, a, &lda, S, U, &ldu, VT, &ldvt, work, &lwork, &info); if(info) printf("info return %d\n",info); for(int i=0; i<m; i++){ val_t sum = 0.0; for(int j=0; j<n; j++) sum += U[i + j*ldu] * U[i + j*ldu]; sum = sqrt((double)sum); lev_score[factor][i] = sum; } } #ifdef SPLATT_USE_MPI static void p_update_factor_all2all( tc_model * const model, tc_ws * const ws, idx_t const mode) { rank_info * const rinfo = ws->rinfo; idx_t const m = mode; idx_t const nfactors = model->rank; idx_t const nglobrows = model->globmats[m]->I; val_t const * const restrict gmatv = model->globmats[m]->vals; /* ensure local info is up to date */ assert(rinfo->ownstart[m] + rinfo->nowned[m] <= model->dims[m]); val_t * const restrict matv = model->factors[m]; par_memcpy(matv + (rinfo->ownstart[m] * nfactors), gmatv, rinfo->nowned[m] * nfactors * sizeof(*matv)); if(rinfo->layer_size[mode] == 1) { return; } /* first prepare all values that I own and need to send */ idx_t const mat_start = rinfo->mat_start[m]; idx_t const * const restrict nbr2globs_inds = rinfo->nbr2globs_inds[m]; idx_t const * const restrict local2nbr_inds = rinfo->local2nbr_inds[m]; idx_t const nsends = rinfo->nnbr2globs[m]; idx_t const nrecvs = rinfo->nlocal2nbr[m]; val_t * const restrict nbr2globs_buf = ws->nbr2globs_buf; val_t * const restrict nbr2local_buf = ws->local2nbr_buf; /* fill send buffer */ #pragma omp for for(idx_t s=0; s < nsends; ++s) { assert(nbr2globs_inds[s] >= mat_start); idx_t const row = nbr2globs_inds[s] - mat_start; val_t * const restrict buf_row = nbr2globs_buf + (s * nfactors); val_t const * const restrict gmat_row = gmatv + (row * nfactors); for(idx_t f=0; f < nfactors; ++f) { buf_row[f] = gmat_row[f]; } } /* exchange entries */ #pragma omp master { /* grab ptr/disp from rinfo. nbr2local and local2nbr will have the same * structure so we just reuse those */ int const * const restrict nbr2globs_ptr = rinfo->nbr2globs_ptr[m]; int const * const restrict nbr2local_ptr = rinfo->local2nbr_ptr[m]; int const * const restrict nbr2globs_disp = rinfo->nbr2globs_disp[m]; int const * const restrict nbr2local_disp = rinfo->local2nbr_disp[m]; timer_start(&timers[TIMER_MPI_COMM]); MPI_Alltoallv(nbr2globs_buf, nbr2globs_ptr, nbr2globs_disp, SPLATT_MPI_VAL, nbr2local_buf, nbr2local_ptr, nbr2local_disp, SPLATT_MPI_VAL, rinfo->layer_comm[m]); timer_stop(&timers[TIMER_MPI_COMM]); } #pragma omp barrier /* now write incoming values to my local matrix */ #pragma omp for for(idx_t r=0; r < nrecvs; ++r) { idx_t const row = local2nbr_inds[r]; assert(row < rinfo->ownstart[m] || row >= rinfo->ownend[m]); val_t * const restrict mat_row = matv + (row * nfactors); val_t const * const restrict buf_row = nbr2local_buf + (r * nfactors); for(idx_t f=0; f < nfactors; ++f) { mat_row[f] = buf_row[f]; } } } static void p_init_mpi( sptensor_t const * const train, tc_model * const model, tc_ws * const ws) { idx_t maxlocal2nbr = 0; idx_t maxnbr2globs = 0; for(idx_t m=0; m < train->nmodes; ++m) { maxlocal2nbr = SS_MAX(maxlocal2nbr, ws->rinfo->nlocal2nbr[m]); maxnbr2globs = SS_MAX(maxnbr2globs, ws->rinfo->nnbr2globs[m]); } ws->local2nbr_buf = splatt_malloc(model->rank*maxlocal2nbr * sizeof(val_t)); ws->nbr2globs_buf = splatt_malloc(model->rank*maxnbr2globs * sizeof(val_t)); /* get initial factors */ for(idx_t m=0; m < train->nmodes; ++m) { p_update_factor_all2all(model, ws, m); } timer_reset(&timers[TIMER_MPI_COMM]); } #endif /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void splatt_tc_als( sptensor_t * train, sptensor_t * const validate, tc_model * const model, tc_ws * const ws) { idx_t const nmodes = train->nmodes; idx_t const nfactors = model->rank; #ifdef SPLATT_USE_MPI rank_info * rinfo = ws->rinfo; int const rank = rinfo->rank; #else int const rank = 0; #endif // if(rank == 0) { // printf("BUFSIZE=%d\n", ALS_BUFSIZE); // printf("USE_3MODE_OPT=%d\n", USE_3MODE_OPT); // } /* store dense modes redundantly among threads */ thd_info * thd_densefactors = NULL; if(ws->num_dense > 0) { thd_densefactors = thd_init(ws->nthreads, 3, ws->maxdense_dim * nfactors * sizeof(val_t), /* accum */ ws->maxdense_dim * nfactors * nfactors * sizeof(val_t), /* neqs */ ws->maxdense_dim * sizeof(int)); /* nflush */ // if(rank == 0) { // printf("REPLICATING MODES:"); // for(idx_t m=0; m < nmodes; ++m) { // if(ws->isdense[m]) { // printf(" %"SPLATT_PF_IDX, m+1); // } // } // printf("\n\n"); // } } /* load-balanced partition each mode for threads */ idx_t * parts[MAX_NMODES]; splatt_csf csf[MAX_NMODES]; /* convert training data to CSF-ALLMODE */ double * opts = splatt_default_opts(); opts[SPLATT_OPTION_NTHREADS] = ws->nthreads; opts[SPLATT_OPTION_CSF_ALLOC] = SPLATT_CSF_ALLMODE; #ifdef SPLATT_USE_MPI sptensor_t * both = NULL; if(validate != NULL) { both = tt_union(train, validate); } for(idx_t m=0; m < nmodes; ++m) { /* setup communication structures */ mpi_find_owned(train, m, rinfo); if(validate != NULL) { mpi_compute_ineed(rinfo, both, m, nfactors, 1); } else { mpi_compute_ineed(rinfo, train, m, nfactors, 1); } } if(validate != NULL) { tt_free(both); } #endif for(idx_t m=0; m < nmodes; ++m) { #ifdef SPLATT_USE_MPI /* tt has more nonzeros than any of the modes actually need, so we need * to filter them first. */ sptensor_t * tt_filtered = mpi_filter_tt_1d(train, m, rinfo->mat_start[m], rinfo->mat_end[m]); assert(tt_filtered->dims[m] == rinfo->mat_end[m] - rinfo->mat_start[m]); assert(train->indmap[m] == NULL); assert(tt_filtered->indmap[m] == NULL); #endif if(ws->isdense[m]) { /* standard CSF allocation for sparse modes */ opts[SPLATT_OPTION_TILE] = SPLATT_DENSETILE; opts[SPLATT_OPTION_TILEDEPTH] = 1; /* don't tile dense mode */ #ifdef SPLATT_USE_MPI csf_alloc_mode(tt_filtered, CSF_SORTED_MINUSONE, m, csf+m, opts); #else csf_alloc_mode(train, CSF_SORTED_MINUSONE, m, csf+m, opts); #endif parts[m] = NULL; } else { /* standard CSF allocation for sparse modes */ opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE; #ifdef SPLATT_USE_MPI csf_alloc_mode(tt_filtered, CSF_SORTED_MINUSONE, m, csf+m, opts); #else csf_alloc_mode(train, CSF_SORTED_MINUSONE, m, csf+m, opts); #endif parts[m] = csf_partition_1d(csf+m, 0, ws->nthreads); } #ifdef SPLATT_USE_MPI tt_free(tt_filtered); #ifdef SPLATT_DEBUG /* sanity check on nnz */ idx_t totnnz; MPI_Allreduce(&(csf[m].nnz), &totnnz, 1, SPLATT_MPI_IDX, MPI_SUM, rinfo->comm_3d); assert(totnnz == rinfo->global_nnz); #endif #endif } #ifdef SPLATT_USE_MPI p_init_mpi(train, model, ws); /* TERRIBLE HACK for loss computation */ sptensor_t * train_back = train; sptensor_t * tt_filter = mpi_filter_tt_1d(train, 0, rinfo->mat_start[0], rinfo->mat_end[0]); #pragma omp parallel for for(idx_t n=0; n < tt_filter->nnz; ++n) { tt_filter->ind[0][n] += rinfo->mat_start[0]; } train = tt_filter; #endif // if(rank == 0) { // printf("\n"); // } val_t loss = tc_loss_sq(train, model, ws); val_t frobsq = tc_frob_sq(model, ws); tc_converge(train, validate, model, loss, frobsq, 0, ws); double avg_solving_time[3] = {0.0, 0.0, 0.0}; double avg_mttkrp_time[3] = {0.0, 0.0, 0.0}; double avg_tot_time[3] = {0.0, 0.0, 0.0}; int count=0; FILE *f_lev = fopen("Leverage_als.csv", "w"); sp_timer_t mode_timer; timer_reset(&mode_timer); timer_start(&ws->tc_time); val_t **lev_score = (val_t **)malloc(nmodes * sizeof(val_t *)); for(int i=0; i<nmodes; i++) lev_score[i] = (val_t *)malloc((model->dims[i])*sizeof(val_t)); for(idx_t e=1; e < ws->max_its+1; ++e) { count++; for(int i=0; i<nmodes; i++) getLvrgScore(model->factors[i], lev_score, model->rank, model->dims[i], i); #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { double solving_time = 0.0; double mttkrp_time = 0.0; #pragma omp master timer_fstart(&mode_timer); if(ws->isdense[m]) { p_densemode_als_update(csf, m, model, ws, thd_densefactors, tid, &solving_time, &mttkrp_time); /* dense modes are easy */ } else { /* update each row in parallel */ for(idx_t i=parts[m][tid]; i < parts[m][tid+1]; ++i) { p_update_slice(csf+m, 0, i, ws->regularization[m], model, ws, tid, &solving_time, &mttkrp_time); } } #pragma omp barrier #ifdef SPLATT_USE_MPI p_update_factor_all2all(model, ws, m); #endif #pragma omp barrier #pragma omp master { timer_stop(&mode_timer); if(rank == 0) { for(int i=0; i<model->dims[m]; i++) fprintf(f_lev, "%lf,", lev_score[m][i]); fprintf(f_lev, "\n"); avg_tot_time[m] += (double)mode_timer.seconds; avg_mttkrp_time[m] += mttkrp_time; avg_solving_time[m] += solving_time; // printf(" mode: %"SPLATT_PF_IDX" time: %0.3fs\n", m+1, // mode_timer.seconds); // printf("Solving Time: %lf\n",solving_time); // printf("MTTKRP Time: %lf\n", mttkrp_time); // printf("\n"); } } #pragma omp barrier } /* foreach mode */ } /* end omp parallel */ /* compute new obj value, print stats, and exit if converged */ val_t loss = tc_loss_sq(train, model, ws); val_t frobsq = tc_frob_sq(model, ws); if(tc_converge(train, validate, model, loss, frobsq, e, ws)) { break; } } /* foreach iteration */ printf("\n"); for(int i=0; i<nmodes; i++){ printf("MODE: %d\n-----------\n", i); printf(" Total Time: %lf\n", (avg_tot_time[i]/count)); printf(" MTTKRP Time: %lf\n", (avg_mttkrp_time[i]/count)); printf(" Solving Time: %lf\n",(avg_solving_time[i]/count)); printf("\n"); } #ifdef SPLATT_USE_MPI /* UNDO TERRIBLE HACK */ tt_free(train); train = train_back; #endif /* cleanup */ for(idx_t m=0; m < nmodes; ++m) { csf_free_mode(csf+m); splatt_free(parts[m]); } if(ws->maxdense_dim > 0) { thd_free(thd_densefactors, ws->nthreads); } }
dsDoubleVector.c
#include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <math.h> #ifdef USE_OPENMP #include <omp.h> #endif #include "ds.h" char dtoa(int d) { if (d == 0) { return '0'; } else if (d == 1) { return '1'; } else if (d == 2) { return '2'; } else if (d == 3) { return '3'; } else if (d == 4) { return '4'; } else if (d == 5) { return '5'; } else if (d == 6) { return '6'; } else if (d == 7) { return '7'; } else if (d == 8) { return '8'; } else if (d == 9) { return '9'; } else { return 'x'; } } char* utoa(size_t s) { size_t d = 0; size_t temp = s; while (temp > 0) { temp = temp / 10; d = d + 1; } char* str = malloc((d + 1) * sizeof(char)); temp = s; int i = d; while (temp > 0) { int n = temp % 10; char c = dtoa(n); str[i - 1] = c; temp = temp / 10; i = i - 1; } str[d] = '\0'; return str; } DoubleVector* ds_double_vector_new(size_t s) { DoubleVector* vector = malloc(sizeof(DoubleVector)); vector->size = s; vector->vec = malloc(vector->size * sizeof(double)); for (int i = 0; i < vector->size; i++) { vector->vec[i] = 0.0; } return vector; } size_t ds_double_vector_size(DoubleVector* v) { return v->size; } double ds_double_vector_get(DoubleVector* v, size_t index) { if (index > v->size - 1) { ds_double_vector_error(v, "Index out of range\n"); } return v->vec[index]; } void ds_double_vector_set(DoubleVector* v, size_t index, double data) { if (index > v->size - 1) { ds_double_vector_error(v, "Index out of range\n"); } v->vec[index] = data; } double ds_double_vector_magnitude(DoubleVector* v) { size_t len = v->size; double temp = 0.0; for (int i = 0; i < len; i++) { temp += pow(v->vec[i], 2); } return sqrt(temp); } int ds_double_vector_equal(DoubleVector* v1, DoubleVector* v2) { if (!(v1->size == v2->size)) { return 0; } size_t len = v1->size; for (int i = 0; i < len; i++) { if (!(v1->vec[i] == v2->vec[i])) { return 0; } } return 1; } DoubleVector* ds_double_vector_add(DoubleVector* v1, DoubleVector* v2) { size_t len1 = v1->size; size_t len2 = v2->size; if (len1 != len2) { return NULL; } DoubleVector* v = ds_double_vector_new(len1); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len1; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) + ds_double_vector_get(v2, i)); } return v; } DoubleVector* ds_double_vector_scalar_add(DoubleVector* v1, double s) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) + s); } return v; } DoubleVector* ds_double_vector_sub(DoubleVector* v1, DoubleVector* v2) { size_t len1 = v1->size; size_t len2 = v2->size; if (len1 != len2) { return NULL; } DoubleVector* v = ds_double_vector_new(len1); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len1; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) - ds_double_vector_get(v2, i)); } return v; } DoubleVector* ds_double_vector_scalar_sub_first(double s, DoubleVector* v1) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, s - ds_double_vector_get(v1, i)); } return v; } DoubleVector* ds_double_vector_scalar_sub_second(DoubleVector* v1, double s) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) - s); } return v; } DoubleVector* ds_double_vector_mul(DoubleVector* v1, DoubleVector* v2) { size_t len1 = v1->size; size_t len2 = v2->size; if (len1 != len2) { return NULL; } DoubleVector* v = ds_double_vector_new(len1); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len1; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) * ds_double_vector_get(v2, i)); } return v; } DoubleVector* ds_double_vector_scalar_mul(DoubleVector* v1, double s) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) * s); } return v; } DoubleVector* ds_double_vector_div(DoubleVector* v1, DoubleVector* v2) { size_t len1 = v1->size; size_t len2 = v2->size; if (len1 != len2) { return NULL; } DoubleVector* v = ds_double_vector_new(len1); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len1; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) / ds_double_vector_get(v2, i)); } return v; } DoubleVector* ds_double_vector_scalar_div_first(double s, DoubleVector* v1) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, s / ds_double_vector_get(v1, i)); } return v; } DoubleVector* ds_double_vector_scalar_div_second(DoubleVector* v1, double s) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, ds_double_vector_get(v1, i) / s); } return v; } DoubleVector* ds_double_vector_pow(DoubleVector* v1, DoubleVector* v2) { size_t len1 = v1->size; size_t len2 = v2->size; if (len1 != len2) { return NULL; } DoubleVector* v = ds_double_vector_new(len1); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len1; i++) { ds_double_vector_set(v, i, pow(ds_double_vector_get(v1, i), ds_double_vector_get(v2, i))); } return v; } DoubleVector* ds_double_vector_scalar_pow_first(double s, DoubleVector* v1) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, pow(s, ds_double_vector_get(v1, i))); } return v; } DoubleVector* ds_double_vector_scalar_pow_second(DoubleVector* v1, double s) { size_t len = v1->size; DoubleVector* v = ds_double_vector_new(len); #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < len; i++) { ds_double_vector_set(v, i, pow(ds_double_vector_get(v1, i), s)); } return v; } double ds_double_vector_dot(DoubleVector* v1, DoubleVector* v2) { size_t len1 = v1->size; size_t len2 = v2->size; if (len1 != len2) { fprintf(stderr, "Unequal vector size, invalid result\n"); return 0.0; } double sum = 0.0; for (int i = 0; i < len1; i++) { sum += v1->vec[i] * v2->vec[i]; } return sum; } void ds_double_vector_error(DoubleVector* v, const char* msg) { fprintf(stderr, "%s", msg); ds_double_vector_free(v); } void ds_double_vector_free(DoubleVector* v) { free((void*) ((DoubleVector*) v)->vec); free((void*) v); }
bfs_simple.c
/* Copyright (C) 2010-2011 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include "common.h" #include "oned_csr.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csr_graph g; static int64_t* g_oldq; static int64_t* g_newq; static unsigned long* g_visited; static const int coalescing_size = 256; static int64_t* g_outgoing; static size_t* g_outgoing_counts /* 2x actual count */; static MPI_Request* g_outgoing_reqs; static int* g_outgoing_reqs_active; static int64_t* g_recvbuf; void make_graph_data_structure(const tuple_graph* const tg) { //makes the tuple graph into a one directional compressed sparce row graph convert_graph_to_oned_csr(tg, &g); const size_t nlocalverts = g.nlocalverts; g_oldq = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t)); g_newq = (int64_t*)xmalloc(nlocalverts * sizeof(int64_t)); const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; int64_t visited_size = (nlocalverts + ulong_bits - 1) / ulong_bits; g_visited = (unsigned long*)xmalloc(visited_size * sizeof(unsigned long)); g_outgoing = (int64_t*)xMPI_Alloc_mem(coalescing_size * size * 2 * sizeof(int64_t)); g_outgoing_counts = (size_t*)xmalloc(size * sizeof(size_t)) /* 2x actual count */; g_outgoing_reqs = (MPI_Request*)xmalloc(size * sizeof(MPI_Request)); g_outgoing_reqs_active = (int*)xmalloc(size * sizeof(int)); g_recvbuf = (int64_t*)xMPI_Alloc_mem(coalescing_size * 2 * sizeof(int64_t)); } void free_graph_data_structure(void) { free(g_oldq); free(g_newq); free(g_visited); MPI_Free_mem(g_outgoing); free(g_outgoing_counts); free(g_outgoing_reqs); free(g_outgoing_reqs_active); MPI_Free_mem(g_recvbuf); free_oned_csr_graph(&g); } int bfs_writes_depth_map(void) { return 0; } /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { const size_t nlocalverts = g.nlocalverts; /* Set up the queues. */ int64_t* restrict oldq = g_oldq; int64_t* restrict newq = g_newq; size_t oldq_count = 0; size_t newq_count = 0; /* Set up the visited bitmap. */ const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; int64_t visited_size = (nlocalverts + ulong_bits - 1) / ulong_bits; unsigned long* restrict visited = g_visited; memset(visited, 0, visited_size * sizeof(unsigned long)); #define SET_VISITED(v) do {visited[VERTEX_LOCAL((v)) / ulong_bits] |= (1UL << (VERTEX_LOCAL((v)) % ulong_bits));} while (0) #define TEST_VISITED(v) ((visited[VERTEX_LOCAL((v)) / ulong_bits] & (1UL << (VERTEX_LOCAL((v)) % ulong_bits))) != 0) /* Set up buffers for message coalescing, MPI requests, etc. for * communication. */ const int coalescing_size = 256; int64_t* restrict outgoing = g_outgoing; size_t* restrict outgoing_counts = g_outgoing_counts; MPI_Request* restrict outgoing_reqs = g_outgoing_reqs; int* restrict outgoing_reqs_active = g_outgoing_reqs_active; memset(outgoing_reqs_active, 0, size * sizeof(int)); int64_t* restrict recvbuf = g_recvbuf; MPI_Request recvreq; int recvreq_active = 0; /* Termination counter for each level: this variable counts the number of * ranks that have said that they are done sending to me in the current * level. This rank can stop listening for new messages when it reaches * size. */ int num_ranks_done; /* Set all vertices to "not visited." */ {size_t i; for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} /* Mark the root and put it into the queue. */ if (VERTEX_OWNER(root) == rank) { SET_VISITED(root); pred[VERTEX_LOCAL(root)] = root; oldq[oldq_count++] = root; } #define CHECK_MPI_REQS \ /* Check all MPI requests and handle any that have completed. */ \ do { \ /* Test for incoming vertices to put onto the queue. */ \ while (recvreq_active) { \ int flag; \ MPI_Status st; \ MPI_Test(&recvreq, &flag, &st); \ if (flag) { \ recvreq_active = 0; \ int count; \ MPI_Get_count(&st, MPI_INT64_T, &count); \ /* count == 0 is a signal from a rank that it is done sending to me * (using MPI's non-overtaking rules to keep that signal after all * "real" messages. */ \ if (count == 0) { \ ++num_ranks_done; \ } else { \ int j; \ for (j = 0; j < count; j += 2) { \ int64_t tgt = recvbuf[j]; \ int64_t src = recvbuf[j + 1]; \ /* Process one incoming edge. */ \ assert (VERTEX_OWNER(tgt) == rank); \ if (!TEST_VISITED(tgt)) { \ SET_VISITED(tgt); \ pred[VERTEX_LOCAL(tgt)] = src; \ newq[newq_count++] = tgt; \ } \ } \ } \ /* Restart the receive if more messages will be coming. */ \ if (num_ranks_done < size) { \ MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq); \ recvreq_active = 1; \ } \ } else break; \ } \ /* Mark any sends that completed as inactive so their buffers can be * reused. */ \ int c; \ for (c = 0; c < size; ++c) { \ if (outgoing_reqs_active[c]) { \ int flag; \ MPI_Test(&outgoing_reqs[c], &flag, MPI_STATUS_IGNORE); \ if (flag) outgoing_reqs_active[c] = 0; \ } \ } \ } while (0) while (1) { memset(outgoing_counts, 0, size * sizeof(size_t)); num_ranks_done = 1; /* I never send to myself, so I'm always done */ /* Start the initial receive. */ if (num_ranks_done < size) { MPI_Irecv(recvbuf, coalescing_size * 2, MPI_INT64_T, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &recvreq); recvreq_active = 1; } /* Step through the current level's queue. */ size_t i; for (i = 0; i < oldq_count; ++i) { CHECK_MPI_REQS; assert (VERTEX_OWNER(oldq[i]) == rank); assert (pred[VERTEX_LOCAL(oldq[i])] >= 0 && pred[VERTEX_LOCAL(oldq[i])] < g.nglobalverts); int64_t src = oldq[i]; /* Iterate through its incident edges. */ size_t j, j_end = g.rowstarts[VERTEX_LOCAL(oldq[i]) + 1]; //so iterate through all elements in the row j, stop when you get to //the next rowstart for (j = g.rowstarts[VERTEX_LOCAL(oldq[i])]; j < j_end; ++j) { //we have the row that j is in and this will give us the column aka position int64_t tgt = g.column[j]; int owner = VERTEX_OWNER(tgt); /* If the other endpoint is mine, update the visited map, predecessor * map, and next-level queue locally; otherwise, send the target and * the current vertex (its possible predecessor) to the target's owner. * */ if (owner == rank) { if (!TEST_VISITED(tgt)) { SET_VISITED(tgt); pred[VERTEX_LOCAL(tgt)] = src; newq[newq_count++] = tgt; } } else { while (outgoing_reqs_active[owner]) CHECK_MPI_REQS; /* Wait for buffer to be available */ size_t c = outgoing_counts[owner]; outgoing[owner * coalescing_size * 2 + c] = tgt; outgoing[owner * coalescing_size * 2 + c + 1] = src; outgoing_counts[owner] += 2; if (outgoing_counts[owner] == coalescing_size * 2) { MPI_Isend(&outgoing[owner * coalescing_size * 2], coalescing_size * 2, MPI_INT64_T, owner, 0, MPI_COMM_WORLD, &outgoing_reqs[owner]); outgoing_reqs_active[owner] = 1; outgoing_counts[owner] = 0; } } } } /* Flush any coalescing buffers that still have messages. */ int offset; for (offset = 1; offset < size; ++offset) { int dest = MOD_SIZE(rank + offset); if (outgoing_counts[dest] != 0) { while (outgoing_reqs_active[dest]) CHECK_MPI_REQS; MPI_Isend(&outgoing[dest * coalescing_size * 2], outgoing_counts[dest], MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]); outgoing_reqs_active[dest] = 1; outgoing_counts[dest] = 0; } /* Wait until all sends to this destination are done. */ while (outgoing_reqs_active[dest]) CHECK_MPI_REQS; /* Tell the destination that we are done sending to them. */ MPI_Isend(&outgoing[dest * coalescing_size * 2], 0, MPI_INT64_T, dest, 0, MPI_COMM_WORLD, &outgoing_reqs[dest]); /* Signal no more sends */ outgoing_reqs_active[dest] = 1; while (outgoing_reqs_active[dest]) CHECK_MPI_REQS; } /* Wait until everyone else is done (and thus couldn't send us any more * messages). */ while (num_ranks_done < size) CHECK_MPI_REQS; /* Test globally if all queues are empty. */ int64_t global_newq_count; MPI_Allreduce(&newq_count, &global_newq_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD); /* Quit if they all are empty. */ if (global_newq_count == 0) break; /* Swap old and new queues; clear new queue for next level. */ {int64_t* temp = oldq; oldq = newq; newq = temp;} oldq_count = newq_count; newq_count = 0; } #undef CHECK_MPI_REQS } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { owner[i] = VERTEX_OWNER(vertex[i]); local[i] = VERTEX_LOCAL(vertex[i]); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
GB_memoryUsage.c
//------------------------------------------------------------------------------ // GB_memoryUsage: # of bytes used for a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GB_memoryUsage // count # allocated blocks and their sizes ( int64_t *nallocs, // # of allocated memory blocks size_t *mem_deep, // # of bytes in blocks owned by this matrix size_t *mem_shallow, // # of bytes in blocks owned by another matrix const GrB_Matrix A // matrix to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (nallocs != NULL) ; ASSERT (mem_deep != NULL) ; ASSERT (mem_shallow != NULL) ; //-------------------------------------------------------------------------- // count the allocated blocks and their sizes //-------------------------------------------------------------------------- // a matrix contains 0 to 10 dynamically malloc'd blocks (*nallocs) = 0 ; (*mem_deep) = 0 ; (*mem_shallow) = 0 ; if (A == NULL) { #pragma omp flush return (GrB_SUCCESS) ; } GB_Pending Pending = A->Pending ; if (!A->static_header) { (*nallocs)++ ; (*mem_deep) += A->header_size ; } if (A->p != NULL) { if (A->p_shallow) { (*mem_shallow) += A->p_size ; } else { (*nallocs)++ ; (*mem_deep) += A->p_size ; } } if (A->h != NULL) { if (A->h_shallow) { (*mem_shallow) += A->h_size ; } else { (*nallocs)++ ; (*mem_deep) += A->h_size ; } } if (A->b != NULL) { if (A->b_shallow) { (*mem_shallow) += A->b_size ; } else { (*nallocs)++ ; (*mem_deep) += A->b_size ; } } if (A->i != NULL) { if (A->i_shallow) { (*mem_shallow) += A->i_size ; } else { (*nallocs)++ ; (*mem_deep) += A->i_size ; } } if (A->x != NULL) { if (A->x_shallow) { (*mem_shallow) += A->x_size ; } else { (*nallocs)++ ; (*mem_deep) += A->x_size ; } } if (Pending != NULL) { (*nallocs)++ ; (*mem_deep) += Pending->header_size ; } if (Pending != NULL && Pending->i != NULL) { (*nallocs)++ ; (*mem_deep) += Pending->i_size ; } if (Pending != NULL && Pending->j != NULL) { (*nallocs)++ ; (*mem_deep) += Pending->j_size ; } if (Pending != NULL && Pending->x != NULL) { (*nallocs)++ ; (*mem_deep) += Pending->x_size ; } #pragma omp flush return (GrB_SUCCESS) ; }
ten_tusscher_2004_RS_CPU_epi_Test.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_Test.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { //inital conndition //Scenario 2 // real sv11[]={-86.7599490237245,0.00123831208622928,0.784376608695859,0.784218467628080,0.000170016808347696,0.487085364989106,0.00290043259117021,0.999998410220405,1.87270147822737e-08,1.84334654710491e-05,0.999776444937499,1.00727320017378,0.999997421410314,4.09813553215966e-05,1.00091265418338,9.36478320062292,139.974256946572}; //Scenario 3 //real sv11[]={-86.6832615134402,0.00125876883400146,0.782519885686078,0.782385890597164,0.000171886605918564,0.486287153523371,0.00291631476093424,0.999998385692801,1.89678233086951e-08,1.86229043360926e-05,0.999783587315930,1.00721445029128,0.999996850289244,4.23696052205578e-05,0.487079901995765,10.1298949658907,139.478138182002}; //Scenario 4 //real sv11[]={-86.7531659359261,0.00124010826721524,0.784213090011930,0.784063751337305,0.000170184867440439,0.487014769904825,0.00290183337641837,0.999998408105558,1.87481748650298e-08,1.84501422061852e-05,0.999773598689194,1.00768875506436,0.999999512997626,3.10350472687116e-05,1.04650592961489,10.1580626436712,139.167353745914}; //Scenario4_1_106_pop76 //real sv11[]={-86.6337556349546,0.00127215057254844,0.781315329700828,0.781192702879389,0.000173232959601247,0.485771934772721,0.00292661184320977,0.999998369627955,1.91248713554218e-08,1.87462257542883e-05,0.999765973534775,1.00688195901693,0.999991331074147,5.01588072510622e-05,0.719318246052902,9.82154696449291,139.637347751159}; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///initial condition //Scenario 2 real sv11[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; //Scenario 3 //real sv11[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272}; //Scenario4 //real sv11[]={-86.7596599603487,0.00123838857632763,0.784369818846026,0.784223148947282,0.000169972136689011,0.487082365294413,0.00290049182352458,0.999998410215409,1.87279005544269e-08,1.84341746908718e-05,0.999781004659642,1.00771223118124,0.999999564103621,3.04673432492567e-05,0.993358298469861,10.1763606222150,139.168522102236}; //Scenario4_1 //real sv11[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049}; sv[0] = sv11[0]; // V; millivolt sv[1] = sv11[1]; //M sv[2] = sv11[2]; //H sv[3] = sv11[3]; //J sv[4] = sv11[4]; //Xr1 sv[5] = sv11[5]; //Xr2 sv[6] = sv11[6]; //Xs sv[7] = sv11[7]; //S sv[8] = sv11[8]; //R sv[9] = sv11[9]; //D sv[10] = sv11[10]; //F sv[11] = sv11[11]; //FCa sv[12] = sv11[12]; //G sv[13] = sv11[13]; //Cai sv[14] = sv11[14]; //CaSR sv[15] = sv11[15]; //Nai sv[16] = sv11[16]; //Ki // sv[0] = INITIAL_V; // V; millivolt // sv[1] = 0.f; //M // sv[2] = 0.75; //H // sv[3] = 0.75f; //J // sv[4] = 0.f; //Xr1 // sv[5] = 1.f; //Xr2 // sv[6] = 0.f; //Xs // sv[7] = 1.f; //S // sv[8] = 0.f; //R // sv[9] = 0.f; //D // sv[10] = 1.f; //F // sv[11] = 1.f; //FCa // sv[12] = 1.f; //G // sv[13] = 0.0002; //Cai // sv[14] = 0.2f; //CaSR // sv[15] = 11.6f; //Nai // sv[16] = 138.3f; //Ki } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; ///Scenario 2: real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05}; ///Scenario 3: //real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05}; ///Scenario 4: //real parameters []={14.6970262149558,2.32527331724419e-05,0.000121747898718481,0.000276971880166082,0.210038991991875,0.120908114803453,0.200498466936257,5.12988959137240,0.0151231713364490,1.26415205898593,1083.02600285230,0.000542147164379904,0.160470068504854,0.0146070055973378,0.00183114105726186,1.00487709573505e-05}; //Scenario4_1_106_pop76 //real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05}; real GNa=parameters[0]; real GbNa=parameters[1]; real GCaL=parameters[2]; real GbCa=parameters[3]; real Gto=parameters[4]; real Gkr=parameters[5]; real Gks=parameters[6]; real GK1=parameters[7]; real GpK=parameters[8]; real knak=parameters[9]; real knaca=parameters[10]; real Vmaxup=parameters[11]; real GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; /// real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr /// real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI /// real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 /// real GK1=5.405; //Parameters for Ito ///#ifdef EPI /// real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa /// real GNa=14.838; //Parameters for IbNa /// real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; /// real knak=1.362; //Parameters for ICaL /// real GCaL=0.000175; //Parameters for IbCa /// real GbCa=0.000592; //Parameters for INaCa /// real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa /// real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; /// real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
kernel_iq.c
/* ########################################################## # # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # !! !! # # !! KEEP THIS CODE CONSISTENT WITH KERNELPY.PY !! # # !! !! # # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # # # ########################################################## */ // NOTE: the following macros are defined in generate.py: // // MAX_PD : the maximum number of dispersity loops allowed for this model, // which will be at most modelinfo.MAX_PD. // NUM_PARS : the number of parameters in the parameter table // NUM_VALUES : the number of values to skip at the start of the // values array before you get to the dispersity values. // PARAMETER_TABLE : list of parameter declarations used to create the // ParameterTable type. // KERNEL_NAME : model_Iq, model_Iqxy or model_Imagnetic. This code is // included three times, once for each kernel type. // MAGNETIC : defined when the magnetic kernel is being instantiated // NUM_MAGNETIC : the number of magnetic parameters // MAGNETIC_PARS : a comma-separated list of indices to the sld // parameters in the parameter table. // CALL_VOLUME(form, shell, table) : assign form and shell values // CALL_RADIUS_EFFECTIVE(mode, table) : call the R_eff function // CALL_IQ(q, table) : call the Iq function for 1D calcs. // CALL_IQ_A(q, table) : call the Iq function with |q| for 2D data. // CALL_FQ(q, F1, F2, table) : call the Fq function for 1D calcs. // CALL_FQ_A(q, F1, F2, table) : call the Iq function with |q| for 2D data. // CALL_IQ_AC(qa, qc, table) : call the Iqxy function for symmetric shapes // CALL_IQ_ABC(qa, qc, table) : call the Iqxy function for asymmetric shapes // CALL_IQ_XY(qx, qy, table) : call the Iqxy function for arbitrary models // INVALID(table) : test if the current point is feesible to calculate. This // will be defined in the kernel definition file. // PROJECTION : equirectangular=1, sinusoidal=2 // see explore/jitter.py for definitions. #ifndef _PAR_BLOCK_ // protected block so we can include this code twice. #define _PAR_BLOCK_ typedef struct { #if MAX_PD > 0 int32_t pd_par[MAX_PD]; // id of the nth dispersity variable int32_t pd_length[MAX_PD]; // length of the nth dispersity weight vector int32_t pd_offset[MAX_PD]; // offset of pd weights in the value & weight vector int32_t pd_stride[MAX_PD]; // stride to move to the next index at this level #endif // MAX_PD > 0 int32_t num_eval; // total number of voxels in hypercube int32_t num_weights; // total length of the weights vector int32_t num_active; // number of non-trivial pd loops int32_t theta_par; // id of first orientation variable } ProblemDetails; // Intel HD 4000 needs private arrays to be a multiple of 4 long typedef struct { PARAMETER_TABLE } ParameterTable; typedef union { ParameterTable table; double vector[4*((NUM_PARS+3)/4)]; } ParameterBlock; #endif // _PAR_BLOCK_ #if defined(MAGNETIC) && NUM_MAGNETIC > 0 // ===== Helper functions for magnetism ===== // Return value restricted between low and high static double clip(double value, double low, double high) { return (value < low ? low : (value > high ? high : value)); } // Compute spin cross sections given in_spin and out_spin // To convert spin cross sections to sld b: // uu * (sld - m_sigma_x); // dd * (sld + m_sigma_x); // ud * (m_sigma_y - 1j*m_sigma_z); // du * (m_sigma_y + 1j*m_sigma_z); // weights for spin crosssections: dd du real, ud real, uu, du imag, ud imag static void set_spin_weights(double in_spin, double out_spin, double weight[6]) { double norm; in_spin = clip(in_spin, 0.0, 1.0); out_spin = clip(out_spin, 0.0, 1.0); // Previous version of this function took the square root of the weights, // under the assumption that // // w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...) // // However, since the weights are applied to the final intensity and // are not interned inside the I(q) function, we want the full // weight and not the square root. Anyway no function will ever use // set_spin_weights as part of calculating an amplitude, as the weights are // related to polarisation efficiency of the instrument. The weights serve to // construct various magnet scattering cross sections, which are linear combinations // of the spin-resolved cross sections. The polarisation efficiency e_in and e_out // are parameters ranging from 0.5 (unpolarised) beam to 1 (perfect optics). // For in_spin or out_spin <0.5 one assumes a CS, where the spin is reversed/flipped // with respect to the initial supermirror polariser. The actual polarisation efficiency // in this case is however e_in/out = 1-in/out_spin. if (out_spin < 0.5){norm=1-out_spin;} else{norm=out_spin;} // The norm is needed to make sure that the scattering cross sections are //correctly weighted, such that the sum of spin-resolved measurements adds up to // the unpolarised or half-polarised scattering cross section. No intensity weighting // needed on the incoming polariser side (assuming that a user), has normalised // to the incoming flux with polariser in for SANSPOl and unpolarised beam, respectively. weight[0] = (1.0-in_spin) * (1.0-out_spin) / norm; // dd weight[1] = (1.0-in_spin) * out_spin / norm; // du weight[2] = in_spin * (1.0-out_spin) / norm; // ud weight[3] = in_spin * out_spin / norm; // uu weight[4] = weight[1]; // du.imag weight[5] = weight[2]; // ud.imag } // Compute the magnetic sld static double mag_sld( const unsigned int xs, // 0=dd, 1=du.real, 2=ud.real, 3=uu, 4=du.imag, 5=ud.imag const double qx, const double qy, const double px, const double py, const double sld, const double mx, const double my, const double mz ) { if (xs < 4) { const double perp = qy*mx - qx*my; switch (xs) { default: // keep compiler happy; condition ensures xs in [0,1,2,3] case 0: // dd => sld - D M_perpx return sld - px*perp; case 1: // du.real => -D M_perpy return py*perp; case 2: // ud.real => -D M_perpy return py*perp; case 3: // uu => sld + D M_perpx return sld + px*perp; } } else { if (xs== 4) { return -mz; // du.imag => +D M_perpz } else { // index == 5 return +mz; // ud.imag => -D M_perpz } } } #endif // ===== Helper functions for orientation and jitter ===== // To change the definition of the angles, run explore/angles.py, which // uses sympy to generate the equations. #if !defined(_QAC_SECTION) && defined(CALL_IQ_AC) #define _QAC_SECTION typedef struct { double R31, R32; } QACRotation; // Fill in the rotation matrix R from the view angles (theta, phi) and the // jitter angles (dtheta, dphi). This matrix can be applied to all of the // (qx, qy) points in the image to produce R*[qx,qy]' = [qa,qc]' static void qac_rotation( QACRotation *rotation, double theta, double phi, double dtheta, double dphi) { double sin_theta, cos_theta; double sin_phi, cos_phi; // reverse view matrix SINCOS(theta*M_PI_180, sin_theta, cos_theta); SINCOS(phi*M_PI_180, sin_phi, cos_phi); const double V11 = cos_phi*cos_theta; const double V12 = sin_phi*cos_theta; const double V21 = -sin_phi; const double V22 = cos_phi; const double V31 = sin_theta*cos_phi; const double V32 = sin_phi*sin_theta; // reverse jitter matrix SINCOS(dtheta*M_PI_180, sin_theta, cos_theta); SINCOS(dphi*M_PI_180, sin_phi, cos_phi); const double J31 = sin_theta; const double J32 = -sin_phi*cos_theta; const double J33 = cos_phi*cos_theta; // reverse matrix rotation->R31 = J31*V11 + J32*V21 + J33*V31; rotation->R32 = J31*V12 + J32*V22 + J33*V32; } // Apply the rotation matrix returned from qac_rotation to the point (qx,qy), // returning R*[qx,qy]' = [qa,qc]' static void qac_apply( QACRotation *rotation, double qx, double qy, double *qab_out, double *qc_out) { // Indirect calculation of qab, from qab^2 = |q|^2 - qc^2 const double dqc = rotation->R31*qx + rotation->R32*qy; const double dqab_sq = -dqc*dqc + qx*qx + qy*qy; //*qab_out = sqrt(fabs(dqab_sq)); *qab_out = dqab_sq > 0.0 ? sqrt(dqab_sq) : 0.0; *qc_out = dqc; } #endif // _QAC_SECTION #if !defined(_QABC_SECTION) && defined(CALL_IQ_ABC) #define _QABC_SECTION typedef struct { double R11, R12; double R21, R22; double R31, R32; } QABCRotation; // Fill in the rotation matrix R from the view angles (theta, phi, psi) and the // jitter angles (dtheta, dphi, dpsi). This matrix can be applied to all of the // (qx, qy) points in the image to produce R*[qx,qy]' = [qa,qb,qc]' static void qabc_rotation( QABCRotation *rotation, double theta, double phi, double psi, double dtheta, double dphi, double dpsi) { double sin_theta, cos_theta; double sin_phi, cos_phi; double sin_psi, cos_psi; // reverse view matrix SINCOS(theta*M_PI_180, sin_theta, cos_theta); SINCOS(phi*M_PI_180, sin_phi, cos_phi); SINCOS(psi*M_PI_180, sin_psi, cos_psi); const double V11 = -sin_phi*sin_psi + cos_phi*cos_psi*cos_theta; const double V12 = sin_phi*cos_psi*cos_theta + sin_psi*cos_phi; const double V21 = -sin_phi*cos_psi - sin_psi*cos_phi*cos_theta; const double V22 = -sin_phi*sin_psi*cos_theta + cos_phi*cos_psi; const double V31 = sin_theta*cos_phi; const double V32 = sin_phi*sin_theta; // reverse jitter matrix SINCOS(dtheta*M_PI_180, sin_theta, cos_theta); SINCOS(dphi*M_PI_180, sin_phi, cos_phi); SINCOS(dpsi*M_PI_180, sin_psi, cos_psi); const double J11 = cos_psi*cos_theta; const double J12 = sin_phi*sin_theta*cos_psi + sin_psi*cos_phi; const double J13 = sin_phi*sin_psi - sin_theta*cos_phi*cos_psi; const double J21 = -sin_psi*cos_theta; const double J22 = -sin_phi*sin_psi*sin_theta + cos_phi*cos_psi; const double J23 = sin_phi*cos_psi + sin_psi*sin_theta*cos_phi; const double J31 = sin_theta; const double J32 = -sin_phi*cos_theta; const double J33 = cos_phi*cos_theta; // reverse matrix rotation->R11 = J11*V11 + J12*V21 + J13*V31; rotation->R12 = J11*V12 + J12*V22 + J13*V32; rotation->R21 = J21*V11 + J22*V21 + J23*V31; rotation->R22 = J21*V12 + J22*V22 + J23*V32; rotation->R31 = J31*V11 + J32*V21 + J33*V31; rotation->R32 = J31*V12 + J32*V22 + J33*V32; } // Apply the rotation matrix returned from qabc_rotation to the point (qx,qy), // returning R*[qx,qy]' = [qa,qb,qc]' static void qabc_apply( QABCRotation *rotation, double qx, double qy, double *qa_out, double *qb_out, double *qc_out) { *qa_out = rotation->R11*qx + rotation->R12*qy; *qb_out = rotation->R21*qx + rotation->R22*qy; *qc_out = rotation->R31*qx + rotation->R32*qy; } #endif // _QABC_SECTION // ==================== KERNEL CODE ======================== kernel void KERNEL_NAME( int32_t nq, // number of q values const int32_t pd_start, // where we are in the dispersity loop const int32_t pd_stop, // where we are stopping in the dispersity loop pglobal const ProblemDetails *details, pglobal const double *values, // parameter values and distributions pglobal const double *q, // nq q values, with padding to boundary pglobal double *result, // nq+1 return values, again with padding const double cutoff, // cutoff in the dispersity weight product int32_t radius_effective_mode // which effective radius to compute ) { #if defined(USE_GPU) // who we are and what element we are working with #if defined(USE_OPENCL) const int q_index = get_global_id(0); #else // USE_CUDA const int q_index = threadIdx.x + blockIdx.x * blockDim.x; #endif if (q_index >= nq) return; #else // Define q_index here so that debugging statements can be written to work // for both OpenCL and DLL using: // if (q_index == 0) {printf(...);} int q_index = 0; #endif // ** Fill in the local values table ** // Storage for the current parameter values. // These will be updated as we walk the dispersity mesh. ParameterBlock local_values; // values[0] is scale // values[1] is background #ifdef USE_OPENMP #pragma omp parallel for #endif for (int i=0; i < NUM_PARS; i++) { local_values.vector[i] = values[2+i]; //if (q_index==0) printf("p%d = %g\n",i, local_values.vector[i]); } //if (q_index==0) printf("NUM_VALUES:%d NUM_PARS:%d MAX_PD:%d\n", NUM_VALUES, NUM_PARS, MAX_PD); //if (q_index==0) printf("start:%d stop:%d\n", pd_start, pd_stop); // ** Precompute magnatism values ** #if defined(MAGNETIC) && NUM_MAGNETIC>0 // Location of the sld parameters in the parameter vector. // These parameters are updated with the effective sld due to magnetism. const int32_t slds[] = { MAGNETIC_PARS }; // Interpret polarization cross section. // up_frac_i = values[NUM_PARS+2]; // up_frac_f = values[NUM_PARS+3]; // up_angle = values[NUM_PARS+4]; // TODO: could precompute more magnetism parameters before calling the kernel. double xs_weights[8]; // uu, ud real, du real, dd, ud imag, du imag, fill, fill double cos_mspin, sin_mspin; set_spin_weights(values[NUM_PARS+2], values[NUM_PARS+3], xs_weights); SINCOS(-values[NUM_PARS+4]*M_PI_180, sin_mspin, cos_mspin); #endif // MAGNETIC // ** Fill in the initial results ** // If pd_start is zero that means that we are starting a new calculation, // and must initialize the result to zero. Otherwise, we are restarting // the calculation from somewhere in the middle of the dispersity mesh, // and we update the value rather than reset it. Similarly for the // normalization factor, which is stored as the final value in the // results vector (one past the number of q values). // // The code differs slightly between opencl and dll since opencl is only // seeing one q value (stored in the variable "this_F2") while the dll // version must loop over all q. #if defined(CALL_FQ) double weight_norm = (pd_start == 0 ? 0.0 : result[2*nq]); double weighted_form = (pd_start == 0 ? 0.0 : result[2*nq+1]); double weighted_shell = (pd_start == 0 ? 0.0 : result[2*nq+2]); double weighted_radius = (pd_start == 0 ? 0.0 : result[2*nq+3]); #else double weight_norm = (pd_start == 0 ? 0.0 : result[nq]); double weighted_form = (pd_start == 0 ? 0.0 : result[nq+1]); double weighted_shell = (pd_start == 0 ? 0.0 : result[nq+2]); double weighted_radius = (pd_start == 0 ? 0.0 : result[nq+3]); #endif #if defined(USE_GPU) #if defined(CALL_FQ) double this_F2 = (pd_start == 0 ? 0.0 : result[2*q_index+0]); double this_F1 = (pd_start == 0 ? 0.0 : result[2*q_index+1]); #else double this_F2 = (pd_start == 0 ? 0.0 : result[q_index]); #endif #else // !USE_GPU if (pd_start == 0) { #ifdef USE_OPENMP #pragma omp parallel for #endif #if defined(CALL_FQ) // 2*nq for F^2,F pairs for (int q_index=0; q_index < 2*nq; q_index++) result[q_index] = 0.0; #else for (int q_index=0; q_index < nq; q_index++) result[q_index] = 0.0; #endif } //if (q_index==0) printf("start %d %g %g\n", pd_start, pd_norm, result[0]); #endif // !USE_GPU // ====== macros to set up the parts of the loop ======= /* Based on the level of the loop, uses C preprocessor magic to construct level-specific looping variables, including these from loop level 3: int n3 : length of loop for mesh level 3 int i3 : current position in the loop for level 3, which is calculated from a combination of pd_start, pd_stride[3] and pd_length[3]. int p3 : is the index into the parameter table for mesh level 3 double v3[] : pointer into dispersity array to values for loop 3 double w3[] : pointer into dispersity array to weights for loop 3 double weight3 : the product of weights from levels 3 and up, computed as weight5*weight4*w3[i3]. Note that we need an outermost value weight5 set to 1.0 for this to work properly. After expansion, the loop struction will look like the following: // --- PD_INIT(4) --- const int n4 = pd_length[4]; const int p4 = pd_par[4]; pglobal const double *v4 = pd_value + pd_offset[4]; pglobal const double *w4 = pd_weight + pd_offset[4]; int i4 = (pd_start/pd_stride[4])%n4; // position in level 4 at pd_start // --- PD_INIT(3) --- const int n3 = pd_length[3]; ... int i3 = (pd_start/pd_stride[3])%n3; // position in level 3 at pd_start PD_INIT(2) PD_INIT(1) PD_INIT(0) // --- PD_OUTERMOST_WEIGHT(5) --- const double weight5 = 1.0; // --- PD_OPEN(4,5) --- while (i4 < n4) { parameter[p4] = v4[i4]; // set the value for pd parameter 4 at this mesh point const double weight4 = w4[i4] * weight5; // from PD_OPEN(3,4) while (i3 < n3) { parameter[p3] = v3[i3]; // set the value for pd parameter 3 at this mesh point const double weight3 = w3[i3] * weight4; PD_OPEN(3,2) PD_OPEN(2,1) PD_OPEN(0,1) // ... main loop body ... APPLY_PROJECTION // convert jitter values to spherical coords BUILD_ROTATION // construct the rotation matrix qxy => qabc for each q FETCH_Q // set qx,qy from the q input vector APPLY_ROTATION // convert qx,qy to qa,qb,qc CALL_KERNEL // F2 = Iqxy(qa, qb, qc, p1, p2, ...) ++step; // increment counter representing position in dispersity mesh PD_CLOSE(0) PD_CLOSE(1) PD_CLOSE(2) // --- PD_CLOSE(3) --- if (step >= pd_stop) break; ++i3; } i3 = 0; // reset loop counter for next round through the loop // --- PD_CLOSE(4) --- if (step >= pd_stop) break; ++i4; } i4 = 0; // reset loop counter even though no more rounds through the loop */ // ** prepare inner loops ** // Depending on the shape type (radial, axial, triaxial), the variables // and calling parameters in the loop body will be slightly different. // Macros capture the differences in one spot so the rest of the code // is easier to read. The code below both declares variables for the // inner loop and defines the macros that use them. #if defined(CALL_FQ) // COMPUTE_F1_F2 is true // unoriented 1D returning <F> and <F^2> // Note that F1 and F2 are returned from CALL_FQ by reference, and the // user of the CALL_KERNEL macro below is assuming that F1 and F2 are defined. double qk; double F1, F2; #define FETCH_Q() do { qk = q[q_index]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_FQ(qk,F1,F2,local_values.table) #elif defined(CALL_FQ_A) // unoriented 2D return <F> and <F^2> // Note that the CALL_FQ_A macro is computing _F1_slot and _F2_slot by // reference then returning _F2_slot. We are calling them _F1_slot and // _F2_slot here so they don't conflict with _F1 and _F2 in the macro // expansion, or with the use of F2 = CALL_KERNEL() when it is used below. double qx, qy; double _F1_slot, _F2_slot; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_FQ_A(sqrt(qx*qx+qy*qy),_F1_slot,_F2_slot,local_values.table) #elif defined(CALL_IQ) // unoriented 1D return <F^2> double qk; #define FETCH_Q() do { qk = q[q_index]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_IQ(qk,local_values.table) #elif defined(CALL_IQ_A) // unoriented 2D double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_IQ_A(sqrt(qx*qx+qy*qy), local_values.table) #elif defined(CALL_IQ_AC) // oriented symmetric 2D double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) double qa, qc; QACRotation rotation; // theta, phi, dtheta, dphi are defined below in projection to avoid repeated code. #define BUILD_ROTATION() qac_rotation(&rotation, theta, phi, dtheta, dphi); #define APPLY_ROTATION() qac_apply(&rotation, qx, qy, &qa, &qc) #define CALL_KERNEL() CALL_IQ_AC(qa, qc, local_values.table) #elif defined(CALL_IQ_ABC) // oriented asymmetric 2D double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) double qa, qb, qc; QABCRotation rotation; // theta, phi, dtheta, dphi are defined below in projection to avoid repeated code. // psi and dpsi are only for IQ_ABC, so they are processed here. const double psi = values[details->theta_par+4]; local_values.table.psi = 0.; #define BUILD_ROTATION() qabc_rotation(&rotation, theta, phi, psi, dtheta, dphi, local_values.table.psi) #define APPLY_ROTATION() qabc_apply(&rotation, qx, qy, &qa, &qb, &qc) #define CALL_KERNEL() CALL_IQ_ABC(qa, qb, qc, local_values.table) #elif defined(CALL_IQ_XY) // direct call to qx,qy calculator double qx, qy; #define FETCH_Q() do { qx = q[2*q_index]; qy = q[2*q_index+1]; } while (0) #define BUILD_ROTATION() do {} while(0) #define APPLY_ROTATION() do {} while(0) #define CALL_KERNEL() CALL_IQ_XY(qx, qy, local_values.table) #endif // Define APPLY_PROJECTION depending on model symmetries. We do this outside // the previous if block so that we don't need to repeat the identical // logic in the IQ_AC and IQ_ABC branches. This will become more important // if we implement more projections, or more complicated projections. #if defined(CALL_IQ) || defined(CALL_IQ_A) || defined(CALL_FQ) || defined(CALL_FQ_A) // no orientation #define APPLY_PROJECTION() const double weight=weight0 #elif defined(CALL_IQ_XY) // pass orientation to the model // CRUFT: support oriented model which define Iqxy rather than Iqac or Iqabc // Need to plug the values for the orientation angles back into parameter // table in case they were overridden by the orientation offset. This // means that orientation dispersity will not work for these models, but // it was broken anyway, so no matter. Still want to provide Iqxy in case // the user model wants full control of orientation/magnetism. #if defined(HAVE_PSI) const double theta = values[details->theta_par+2]; const double phi = values[details->theta_par+3]; const double psi = values[details->theta_par+4]; double weight; #define APPLY_PROJECTION() do { \ local_values.table.theta = theta; \ local_values.table.phi = phi; \ local_values.table.psi = psi; \ weight=weight0; \ } while (0) #elif defined(HAVE_THETA) const double theta = values[details->theta_par+2]; const double phi = values[details->theta_par+3]; double weight; #define APPLY_PROJECTION() do { \ local_values.table.theta = theta; \ local_values.table.phi = phi; \ weight=weight0; \ } while (0) #else #define APPLY_PROJECTION() const double weight=weight0 #endif #else // apply jitter and view before calling the model // Grab the "view" angles (theta, phi, psi) from the initial parameter table. const double theta = values[details->theta_par+2]; const double phi = values[details->theta_par+3]; // Make sure jitter angle defaults to zero if there is no jitter distribution local_values.table.theta = 0.; local_values.table.phi = 0.; // The "jitter" angles (dtheta, dphi, dpsi) are stored with the // dispersity values and copied to the local parameter table as // we go through the mesh. double dtheta, dphi, weight; #if PROJECTION == 1 // equirectangular #define APPLY_PROJECTION() do { \ dtheta = local_values.table.theta; \ dphi = local_values.table.phi; \ weight = fabs(cos(dtheta*M_PI_180)) * weight0; \ } while (0) #elif PROJECTION == 2 // sinusoidal #define APPLY_PROJECTION() do { \ dtheta = local_values.table.theta; \ dphi = local_values.table.phi; \ weight = weight0; \ if (dtheta != 90.0) dphi /= cos(dtheta*M_PI_180); \ else if (dphi != 0.0) weight = 0.; \ if (fabs(dphi) >= 180.) weight = 0.; \ } while (0) #endif #endif // done defining APPLY_PROJECTION // ** define looping macros ** // Define looping variables #define PD_INIT(_LOOP) \ const int n##_LOOP = details->pd_length[_LOOP]; \ const int p##_LOOP = details->pd_par[_LOOP]; \ pglobal const double *v##_LOOP = pd_value + details->pd_offset[_LOOP]; \ pglobal const double *w##_LOOP = pd_weight + details->pd_offset[_LOOP]; \ int i##_LOOP = (pd_start/details->pd_stride[_LOOP])%n##_LOOP; // Jump into the middle of the dispersity loop #define PD_OPEN(_LOOP,_OUTER) \ while (i##_LOOP < n##_LOOP) { \ local_values.vector[p##_LOOP] = v##_LOOP[i##_LOOP]; \ const double weight##_LOOP = w##_LOOP[i##_LOOP] * weight##_OUTER; // create the variable "weight#=1.0" where # is the outermost level+1 (=MAX_PD). #define _PD_OUTERMOST_WEIGHT(_n) const double weight##_n = 1.0; #define PD_OUTERMOST_WEIGHT(_n) _PD_OUTERMOST_WEIGHT(_n) // Close out the loop #define PD_CLOSE(_LOOP) \ if (step >= pd_stop) break; \ ++i##_LOOP; \ } \ i##_LOOP = 0; // ====== construct the loops ======= // Pointers to the start of the dispersity and weight vectors, if needed. #if MAX_PD>0 pglobal const double *pd_value = values + NUM_VALUES; pglobal const double *pd_weight = pd_value + details->num_weights; #endif // The variable "step" is the current position in the dispersity loop. // It will be incremented each time a new point in the mesh is accumulated, // and used to test whether we have reached pd_stop. int step = pd_start; // *** define loops for each of 0, 1, 2, ..., modelinfo.MAX_PD-1 *** // define looping variables #if MAX_PD>4 PD_INIT(4) #endif #if MAX_PD>3 PD_INIT(3) #endif #if MAX_PD>2 PD_INIT(2) #endif #if MAX_PD>1 PD_INIT(1) #endif #if MAX_PD>0 PD_INIT(0) #endif // open nested loops PD_OUTERMOST_WEIGHT(MAX_PD) #if MAX_PD>4 PD_OPEN(4,5) #endif #if MAX_PD>3 PD_OPEN(3,4) #endif #if MAX_PD>2 PD_OPEN(2,3) #endif #if MAX_PD>1 PD_OPEN(1,2) #endif #if MAX_PD>0 PD_OPEN(0,1) #endif //if (q_index==0) {printf("step:%d of %d, pars:",step,pd_stop); for (int i=0; i < NUM_PARS; i++) printf("p%d=%g ",i, local_values.vector[i]); printf("\n");} // ====== loop body ======= #ifdef INVALID if (!INVALID(local_values.table)) #endif { APPLY_PROJECTION(); // Accumulate I(q) // Note: weight==0 must always be excluded if (weight > cutoff) { double form, shell; CALL_VOLUME(form, shell, local_values.table); weight_norm += weight; weighted_form += weight * form; weighted_shell += weight * shell; if (radius_effective_mode != 0) { weighted_radius += weight * CALL_RADIUS_EFFECTIVE(radius_effective_mode, local_values.table); } BUILD_ROTATION(); #if !defined(USE_GPU) // DLL needs to explicitly loop over the q values. #ifdef USE_OPENMP #pragma omp parallel for #endif for (q_index=0; q_index<nq; q_index++) #endif // !USE_GPU { FETCH_Q(); APPLY_ROTATION(); // ======= COMPUTE SCATTERING ========== #if defined(MAGNETIC) && NUM_MAGNETIC > 0 // Compute the scattering from the magnetic cross sections. double F2 = 0.0; const double qsq = qx*qx + qy*qy; if (qsq > 1.e-16) { // TODO: what is the magnetic scattering at q=0 const double px = (qy*cos_mspin + qx*sin_mspin)/qsq; const double py = (qy*sin_mspin - qx*cos_mspin)/qsq; // loop over uu, ud real, du real, dd, ud imag, du imag for (unsigned int xs=0; xs<6; xs++) { const double xs_weight = xs_weights[xs]; if (xs_weight > 1.e-8) { // Since the cross section weight is significant, set the slds // to the effective slds for this cross section, call the // kernel, and add according to weight. for (int sk=0; sk<NUM_MAGNETIC; sk++) { const int32_t mag_index = NUM_PARS+5 + 3*sk; const int32_t sld_index = slds[sk]; const double mx = values[mag_index]; const double my = values[mag_index+1]; const double mz = values[mag_index+2]; local_values.vector[sld_index] = mag_sld(xs, qx, qy, px, py, values[sld_index+2], mx, my, mz); //if (q_index==0) printf("%d: (qx,qy)=(%g,%g) xs=%d sld%d=%g p=(%g,%g) m=(%g,%g,%g)\n", // q_index, qx, qy, xs, sk, local_values.vector[sld_index], px, py, mx, my, mz); } F2 += xs_weight * CALL_KERNEL(); } } } #else // !MAGNETIC #if defined(CALL_FQ) CALL_KERNEL(); // sets F1 and F2 by reference #else const double F2 = CALL_KERNEL(); #endif #endif // !MAGNETIC //printf("q_index:%d %g %g %g %g\n", q_index, F2, weight0); #if defined(USE_GPU) #if defined(CALL_FQ) this_F2 += weight * F2; this_F1 += weight * F1; #else this_F2 += weight * F2; #endif #else // !USE_OPENCL #if defined(CALL_FQ) result[2*q_index+0] += weight * F2; result[2*q_index+1] += weight * F1; #else result[q_index] += weight * F2; #endif #endif // !USE_OPENCL } } } // close nested loops ++step; #if MAX_PD>0 PD_CLOSE(0) #endif #if MAX_PD>1 PD_CLOSE(1) #endif #if MAX_PD>2 PD_CLOSE(2) #endif #if MAX_PD>3 PD_CLOSE(3) #endif #if MAX_PD>4 PD_CLOSE(4) #endif // Remember the results and the updated norm. #if defined(USE_GPU) #if defined(CALL_FQ) result[2*q_index+0] = this_F2; result[2*q_index+1] = this_F1; #else result[q_index] = this_F2; #endif if (q_index == 0) #endif { #if defined(CALL_FQ) result[2*nq] = weight_norm; result[2*nq+1] = weighted_form; result[2*nq+2] = weighted_shell; result[2*nq+3] = weighted_radius; #else result[nq] = weight_norm; result[nq+1] = weighted_form; result[nq+2] = weighted_shell; result[nq+3] = weighted_radius; #endif } // ** clear the macros in preparation for the next kernel ** #undef PD_INIT #undef PD_OPEN #undef PD_CLOSE #undef FETCH_Q #undef APPLY_PROJECTION #undef BUILD_ROTATION #undef APPLY_ROTATION #undef CALL_KERNEL }
kernel_cpu_2.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } // #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <omp.h> // (in directory known to compiler) #include <stdlib.h> // (in directory known to compiler) #include <assert.h> #include <stdio.h> //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "timer.h" // (in directory provided here) needed by timer //======================================================================================================================================================150 // HEADER //======================================================================================================================================================150 #include "./kernel_cpu_2.h" // (in directory provided here) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu_2( int cores_arg, knode *knodes, long knodes_elem, int order, long maxheight, int count, long *currKnode, long *offset, long *lastKnode, long *offset_2, int *start, int *end, int *recstart, int *reclength) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; // common variables int i; time0 = get_time(); //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; { time1 = get_time(); //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; // process number of querries { const unsigned long long parallel_for_start = current_time_ns(); #pragma omp parallel for private (i, thid) for(bid = 0; bid < count; bid++){ // process levels of the tree for(i = 0; i < maxheight; i++){ // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ if((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[currKnode[bid]].indices[thid] < knodes_elem){ offset[bid] = knodes[currKnode[bid]].indices[thid]; } } if((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[lastKnode[bid]].indices[thid] < knodes_elem){ offset_2[bid] = knodes[lastKnode[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; lastKnode[bid] = offset_2[bid]; } // process leaves for(thid = 0; thid < threadsPerBlock; thid++){ // Find the index of the starting record if(knodes[currKnode[bid]].keys[thid] == start[bid]){ recstart[bid] = knodes[currKnode[bid]].indices[thid]; } } // process leaves for(thid = 0; thid < threadsPerBlock; thid++){ // Find the index of the ending record if(knodes[lastKnode[bid]].keys[thid] == end[bid]){ reclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1; } } } ; const unsigned long long parallel_for_end = current_time_ns(); printf("pragma93_omp_parallel %llu ns\n", parallel_for_end - parallel_for_start); } time2 = get_time(); } //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of CPU/MCPU KERNEL:\n"); printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time2-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time2-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time2-time0) / 1000000); } // main //========================================================================================================================================================================================================200 // END //========================================================================================================================================================================================================200 // #ifdef __cplusplus // } // #endif
planePrimitive.h
#ifndef __RAPTER_PLANEPRIMITIVE_H__ #define __RAPTER_PLANEPRIMITIVE_H__ #include <Eigen/Dense> #include "rapter/optimization/energyFunctors.h" #include "rapter/primitives/primitive.h" #include "rapter/processing/util.hpp" // pca, getPopulationOf() #ifdef RAPTER_USE_PCL # include "pcl/ModelCoefficients.h" # include "pcl/sample_consensus/sac_model_plane.h" # include "pcl/common/common.h" # include "pcl/visualization/pcl_visualizer.h" # include "pcl/surface/concave_hull.h" #endif //#include "omp.h" namespace rapter { //! \brief Class to wrap a plane with. Implements #pos() and #dir() functions, and a constructor taking a position and a direction. //! //! Stores 3D normal at the first three coeffs, and distance from origin at the fourth coordinate. //template <typename _Scalar> class PlanePrimitive : public ::rapter::Primitive<3,6> { typedef ::rapter::Primitive<3,6> ParentT; //using ParentT::_coeffs; //using ParentT::Scalar; //using ParentT::Dim; public: //enum { Dim = ParentT::Dim }; typedef ParentT::Scalar Scalar; typedef std::vector<Eigen::Matrix<Scalar,3,1> > ExtentsT; // ____________________CONSTRUCT____________________ inline PlanePrimitive() : ParentT() {} //! \brief Constructor that takes raw data in Eigen format as input. inline PlanePrimitive( Eigen::Matrix<Scalar,Dim,1> coeffs ) : ParentT( coeffs ) {} //! \brief Constructor that takes raw data in std::vector format as input. inline PlanePrimitive( std::vector<Scalar> const& coeffs ) : ParentT( coeffs ) {} /*! \brief Creates PlanePrimitive from point on plane and direction. * \param[in] p0 Point on plane. * \param[in] dir Plane normal. */ PlanePrimitive( Eigen::Matrix<Scalar,3,1> const& pnt, Eigen::Matrix<Scalar,3,1> const& normal ); /*! \brief Creates plane primitive from an ordered local frame */ inline PlanePrimitive( Eigen::Matrix<Scalar,3,1> const& centroid, Eigen::Matrix<Scalar,3,1> const& eigen_values, Eigen::Matrix<Scalar, 3, 3> const& eigen_vectors ); /*! \brief Called from \ref rapter::CandidateGenerator::generate to create a new candidate from this and \p other. * Create back-rotated version of the other at this position. * \param[out] out Created output primitive. * \param[in] other Primitive, who's direction we want to use to generate something at our location. * \param[in] closest_angle_id The id of closest perfect angle between us and \p other. This we want to rotate back by. * \param[in] angles List of desired angles, one of its entries is referenced by closest_angle_id. * \return \p out is only valid, if return true. We might decide, that there's nothing to create, in which case we return false. */ template <class _AngleContainerT> bool generateFrom( PlanePrimitive & out , PlanePrimitive const& other , int const closest_angle_id , _AngleContainerT const& angles , Scalar const angle_multiplier = Scalar(1.) ) const; template <typename DerivedT> static int generateFrom( PlanePrimitive & out , DerivedT const& normal , Scalar const distanceFromOrigin ); // ____________________VIRTUALS____________________ /*! \brief Compulsory virtual overload of position getter. The position of the plane is calculated on the fly from the formula N . x0 + d = 0. * \return The position of the plane as a 3D Eigen::Vector. */ //virtual Eigen::Matrix<Scalar,3,1> pos() const { return _coeffs.template head<3>(); } inline typename Eigen::Matrix<Scalar,Dim,1>::ConstFixedSegmentReturnType<3>::Type pos() const { return _coeffs.template head<3>(); } /*! \brief Compulsory virtual overload of orientation getter. The orientation of the plane is the normal stored at the first three coordinates of #_coeffs. * \return The normal of the plane as a 3D Eigen::Vector. */ //virtual Eigen::Matrix<Scalar,3,1> dir() const { return _coeffs.template segment<3>(3); } inline typename Eigen::Matrix<Scalar,Dim,1>::ConstFixedSegmentReturnType<3>::Type dir() const { return _coeffs.template segment<3>(3); } /*! \brief Returns the normal, that is stored at the first three coordinates of the internal storage. * \return The plane normal as a 3D Eigen::Vector Map. */ inline typename Eigen::Matrix<Scalar,Dim,1>::ConstFixedSegmentReturnType<3>::Type normal() const { return _coeffs.segment<3>(3); } // _______________________IO_______________________ /*! \brief Used in \ref io::readPrimitives to determine how many floats to parse from one entry. */ static inline int getFileEntryLength() { return 6; } /*! \brief Output <x0,n>, location and normal for the plane to a string that does *not* have an endline at the end. */ std::string toFileEntry() const; /*! \brief Used in \ref io::readPrimitives to create the primitive from the read floats. * \param[in] entries Contains <x0,n> to create the plane from. */ static PlanePrimitive fromFileEntry( std::vector<Scalar> const& entries ); // ____________________GEOMETRY____________________ /*! \brief Returns point to plane distance. * \param[in] point Point to calculate distance from. * \return Distance from point to plane. */ template <typename _DerivedT> inline Scalar getDistance( _DerivedT const& point ) const { return (point - this->pos()).dot( this->dir() ); } /*! \brief Helps calculating plane to plane distance. * \param[in] extrema Extrema of this primitive. * \param[in] pnt One of the extrema of the other primitive. * \return Distance from point to plane. */ Scalar getFiniteDistance( ExtentsT const& extrema, Position const& pnt ) const; int to4Coeffs( std::vector<Scalar> &coeffs ) const; Eigen::Matrix<Scalar,3,1> projectPoint( Eigen::Matrix<Scalar,3,1> const& point ) const; /*! \brief Calculates the length of the plane based on the points in \p cloud, masked by \p indices and the distance from point to plane \p threshold. * * The method calculates the inliers and selects the most far away point from #pos() in both directions. * \tparam _PointT Point wrapper stored in _PointContainerT. * \tparam _PointContainerT Type to store the points to select inliers from. Concept: std::vector<\ref rapter::PointPrimitive>. Depr: pcl::PointCloud< _PointT >::Ptr. * \tparam _IndicesContainerT Concept: std::vector<int>. * \param[out] minMax Output container holding the four endpoints. * \param[in] cloud Point container to look for inlier points in. * \param[in] threshold A point in \p cloud is an inlier, if it is closer than this threshold. Usually the "scale". * \param[in] indices Optional input to specify subset of points by indices. * \return EXIT_SUCCESS */ template <typename _PointPrimitiveT, class _IndicesContainerT, typename _PointContainerT> int getExtent( ExtentsT & minMax , _PointContainerT const& cloud , double const threshold = 0.01 , _IndicesContainerT const* indices_arg = NULL , bool const force_axis_aligned = false ) const; /*! \brief Calculates size, a bit smarter, than taking the area of #getExtent(). * \tparam MatrixDerived Concept: Eigen::Matrix<_Scalar,-1,1>. */ template <class _IndicesContainerT, typename MatrixDerived, typename _Scalar, class _PointContainerT > MatrixDerived& getSpatialSignificance( MatrixDerived & in , _PointContainerT const& points , _Scalar const /*scale*/ , _IndicesContainerT * indices = NULL , bool const return_squared = false ) const; // ____________________DRAWING____________________ #ifdef RAPTER_USE_PCL inline pcl::ModelCoefficients::Ptr modelCoefficients() const; /*! \brief Draws the Plane from extrema * \tparam PointsT Concept: std::vector<pcl::PointXYZ>. */ template <class PointsT> static inline int draw( PointsT const& corners , pcl::visualization::PCLVisualizer::Ptr v , std::string const plane_name , double const r , double const g , double const b , int const viewport_id = 0 ); /*! \brief Extract convec hull and display * \tparam PclCloudT Concept: pcl::PointCloud<pcl::PointXYZ>::Ptr plane_polygon_cloud_ptr( new pcl::PointCloud<pcl::PointXYZ> ). * \tparam _PointContainerT Concept: std::vector<pcl::PointXYZ>. * \tparam _IndicesContainerT Concept: std::vector<int>. */ template < class PclCloudT, class _PointContainerT, class _IndicesContainerT> static inline int getHull( PclCloudT & plane_polygon_cloud , PlanePrimitive const& plane , _PointContainerT const& points , _IndicesContainerT const* indices , float const alpha = 2.f , pcl::PolygonMesh * out_mesh = NULL ); /*! \brief Extract convec hull and display * \tparam PointsT Concept: std::vector<pcl::PointXYZ>. */ template <class _PointContainerT, class _IndicesContainerT> static inline int drawConvex( pcl::visualization::PCLVisualizer::Ptr v , PlanePrimitive const& plane , _PointContainerT const& cloud , _IndicesContainerT const* indices , std::string const plane_name , double const r , double const g , double const b , int const viewport_id = 0 , float const alpha = 2.f ); /*! \brief Draws plane. * \tparam _PointPrimitiveT Concept: \ref rapter::PointPrimitive. * \tparam _PointContainerT Concept: std::vector< _PointPrimitiveT >. * \tparam _IndicesContainerT Concept: std::vector<int>. */ template <class _PointPrimitiveT, class _PointContainerT, class _IndicesContainerT> static int draw( PlanePrimitive const& plane , _PointContainerT const& cloud , Scalar const radius , _IndicesContainerT const* indices , pcl::visualization::PCLVisualizer::Ptr v , std::string const& plane_name , double const r , double const g , double const b , int const viewport_id = 0 , Scalar const stretch = Scalar( 1. ) , int const draw_mode = 0 , Scalar const alpha = 2. ); #endif //...RAPTER_USE_PCL bool gidUnset() const; }; //...class PlanePrimitive } //...ns rapter // ________________________________________________________HPP_________________________________________________________ #if 0 #ifndef __RAPTER_INC_PLANEPRIMITIVE_HPP__ #define __RAPTER_INC_PLANEPRIMITIVE_HPP__ namespace rapter { PlanePrimitive::PlanePrimitive( Eigen::Matrix<Scalar, 3, 1> pnt, Eigen::Matrix<Scalar, 3, 1> normal ) { _coeffs.template head<3>() = pnt; _coeffs.template segment<3>(3) = normal.normalized(); //_coeffs.template segment<3>(0) = normal.normalized(); //_coeffs (3) = Scalar(-1) * _coeffs.template head<3>().dot( pnt.template head<3>() ); // distance } /*! \brief Creates plane primitive from an ordered local frame */ PlanePrimitive::PlanePrimitive( Eigen::Matrix<Scalar,3,1> const& centroid, Eigen::Matrix<Scalar,3,1> const& eigen_values, Eigen::Matrix<Scalar, 3, 3> const& eigen_vectors ) { // get eigen vector for biggest eigen value const int min_eig_val_id = std::distance( eigen_values.data(), std::min_element( eigen_values.data(), eigen_values.data()+3 ) ); // set position _coeffs.template head<3>() = centroid; // set direction _coeffs.template segment<3>(3) = eigen_vectors.col(min_eig_val_id).normalized(); } /*! \brief Called from \ref rapter::CandidateGenerator::generate to create a new candidate from this and \p other. * Create back-rotated version of the other at this position. * \param[out] out Created output primitive. * \param[in] other Primitive, who's direction we want to use to generate something at our location. * \param[in] closest_angle_id The id of closest perfect angle between us and \p other. This we want to rotate back by. * \param[in] angles List of desired angles, one of its entries is referenced by closest_angle_id. * \return \p out is only valid, if return true. We might decide, that there's nothing to create, in which case we return false. */ template <class _AngleContainerT> bool PlanePrimitive::generateFrom( PlanePrimitive & out , PlanePrimitive const& other , int const closest_angle_id , _AngleContainerT const& angles , Scalar const angle_multiplier /* = Scalar(1.) */ ) const { // if not 0 or M_PI, meaning not parallel if ( (closest_angle_id != 0) && (closest_angle_id != angles.size()-1) ) { //std::cerr << "[" << __func__ << "]: " << "rotating plane by angle " << angles[closest_angle_id] << std::endl; //return false; //Scalar const angle = angles[ closest_angle_id ]; Scalar angle = angles[ closest_angle_id ]; #warning This has to be tested for angle sign out = PlanePrimitive( /* position: */ this->pos() , /* direction: */ Eigen::AngleAxisf( angle, other.dir().cross(dir()) ) * other.dir() ); } else { //Scalar const angle = angles[ closest_angle_id ]; out = PlanePrimitive( /* position: */ this->pos() , /* direction: */ other.dir() ); } out.copyTagsFrom( *this ); // copy position id from self //out.setTag( GID, this->getTag(GID) ); // copy direction id from the other out.setTag( TAGS::DIR_GID, other.getTag(TAGS::DIR_GID) ); // erase chosen tag - this is a new candidate out.setTag( TAGS::STATUS, STATUS_VALUES::UNSET ); return true; } //...generateFrom int PlanePrimitive::generateFrom( PlanePrimitive & out , Eigen::Matrix<Scalar,3,1> const& normal , Scalar const distanceFromOrigin ) { out.coeffs().segment<3>(3) = normal.normalized(); out.coeffs().segment<3>(0) = Eigen::Matrix<Scalar,3,1>::Zero() - normal * distanceFromOrigin; return EXIT_SUCCESS; } //...generatreFrom() /*! \brief Calculates the length of the plane based on the points in \p cloud, masked by \p indices and the distance from point to plane \p threshold. * * The method calculates the inliers and selects the most far away point from #pos() in both directions. * \tparam _PointT Point wrapper stored in _PointContainerT. * \tparam _PointContainerT Type to store the points to select inliers from. Concept: std::vector<\ref rapter::PointPrimitive>. Depr: pcl::PointCloud< _PointT >::Ptr. * \tparam _IndicesContainerT Concept: std::vector<int>. * \param[out] minMax Output container holding the four endpoints. * \param[in] cloud Point container to look for inlier points in. * \param[in] threshold A point in \p cloud is an inlier, if it is closer than this threshold. Usually the "scale". * \param[in] indices Optional input to specify subset of points by indices. * \return EXIT_SUCCESS */ template <typename _PointPrimitiveT, class _IndicesContainerT, typename _PointContainerT> int PlanePrimitive::getExtent( ExtentsT & minMax , _PointContainerT const& cloud , double const threshold /*= 0.01*/ , _IndicesContainerT const* indices_arg /*= NULL*/ , bool const force_axis_aligned /*= false */) const { typedef Eigen::Matrix<Scalar,3,1> Position; if ( this->_extents.isUpdated() ) { minMax = _extents.get(); return 0; } #ifdef RAPTER_USE_PCL std::vector<LidT> inliers; { inliers.reserve( cloud.size() ); const PidT stop_at = indices_arg ? indices_arg->size() : cloud.size(); for ( PidT i = 0; i != stop_at; ++i ) { const PidT pid = indices_arg ? (*indices_arg)[i] : i; if ( this->getDistance( cloud[pid].template pos() ) < threshold ) inliers.push_back( pid ); } //std::cout << "[" << __func__ << "]: " << "indices.size(): " << stop_at << " / " << cloud.size() << std::endl; } // check size if ( !inliers.size() ) return EXIT_FAILURE; // project cloud _PointContainerT on_plane_cloud; on_plane_cloud.reserve( inliers.size() ); for ( PidT pid_id = 0; pid_id != inliers.size(); ++pid_id ) { const PidT pid = inliers[ pid_id ]; on_plane_cloud.push_back( _PointPrimitiveT(this->projectPoint(cloud[pid].template pos()), cloud[pid].template dir()) ); } #if 0 // debug { pcl::PointCloud<pcl::PointXYZRGB>::Ptr c ( new pcl::PointCloud<pcl::PointXYZRGB>() ); pcl::visualization::PCLVisualizer::Ptr vptr( new pcl::visualization::PCLVisualizer("on_plane_cloud") ); vptr->setBackgroundColor( .5, .6, .6 ); for ( PidT pid = 0; pid != on_plane_cloud.size(); ++pid ) { pcl::PointXYZRGB pnt; pnt.getVector3fMap() = on_plane_cloud[pid].template pos(); pnt.r = 255; pnt.g = 0; pnt.b = 0; c->push_back( pnt ); } vptr->addPointCloud( c, "onplane"); vptr->setPointCloudRenderingProperties( pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 3.f, "onplane" ); pcl::PointCloud<pcl::PointXYZRGB>::Ptr c1 ( new pcl::PointCloud<pcl::PointXYZRGB>() ); for ( PidT pid_id = 0; pid_id != inliers.size(); ++pid_id ) { pcl::PointXYZRGB pnt; pnt.getVector3fMap() = cloud[ inliers[pid_id] ].template pos(); pnt.r = 0; pnt.g = 0; pnt.b = 255; c1->push_back( pnt ); } vptr->addPointCloud( c1, "origcloud" ); vptr->setPointCloudRenderingProperties( pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 3.f, "origcloud" ); char plane_name[255]; sprintf( plane_name, "plane%03d", 0 ); vptr->addPlane( *(this->modelCoefficients()), plane_name, 0 ); vptr->spin(); } #endif Eigen::Matrix<Scalar,4,4> frame; // 3 major vectors as columns, and the fourth is the centroid { processing::PCA<_IndicesContainerT>( frame, on_plane_cloud, /* indices: */ NULL ); // no indices needed, already full cloud if ( force_axis_aligned ) { // get the unit axis that is most perpendicular to the 3rd dimension of the frame std::pair<Position,Scalar> dim3( Position::Zero(), Scalar(FLT_MAX) ); { Scalar tmp; if ( (tmp=std::abs(frame.col(2).template head<3>().dot( Position::UnitX() ))) < dim3.second ) { dim3.first = Position::UnitX(); dim3.second = tmp; } if ( (tmp=std::abs(frame.col(2).template head<3>().dot( Position::UnitY() ))) < dim3.second ) { dim3.first = Position::UnitY(); dim3.second = tmp; } if ( (tmp=std::abs(frame.col(2).template head<3>().dot( Position::UnitZ() ))) < dim3.second ) { dim3.first = Position::UnitZ(); dim3.second = tmp; } } frame.col(0).head<3>() = frame.col(2).head<3>().cross( dim3.first ).normalized(); frame.col(1).head<3>() = frame.col(2).head<3>().cross( frame.col(0).head<3>() ).normalized(); } } #if 0 Scalar step = Scalar(1. * M_PI) / Scalar(180.); Scalar limits[2] = { Scalar(0.), M_PI/Scalar(2.) }; for ( int it = 0; it != 2; ++it, step /= Scalar(5.) ) { std::pair <Scalar,Scalar> min_volume; // <ang, volume> min_volume.first = Scalar(-1.); min_volume.second = Scalar(FLT_MAX); int i = 0; for ( Scalar ang = limits[0]; ang < limits[1]; ang += step, ++i ) { std::cout << "rot by " << ang << ", step: " << step << std::endl; // rotated frame Eigen::Matrix4f tmp_frame = frame; // rotate frame around up_vector by ang Eigen::AngleAxisf rot( ang, tmp_frame.block<3,1>(0,2) ); tmp_frame.block<3,1>(0,0) = rot * tmp_frame.block<3,1>(0,0); tmp_frame.block<3,1>(0,1) = rot * tmp_frame.block<3,1>(0,1); // calculate volume _PointContainerT local_cloud; processing::cloud2Local<_PointPrimitiveT,_IndicesContainerT>( local_cloud, frame, on_plane_cloud, /* indices: */ NULL ); // no indices needed, it's already a full cloud _PointPrimitiveT min_pt, max_pt; processing::getMinMax3D<_IndicesContainerT>( min_pt, max_pt, local_cloud, /* indices: */ NULL ); Eigen::Vector3f diag = max_pt.template pos() - min_pt.template pos(); //get location of minimum Eigen::MatrixXf::Index minRow, minCol; diag.minCoeff( &minRow, &minCol ); std::cout << "minRow: " << minRow << std::endl; float volume = 0.; if ( minRow == 1 ) volume = diag(0) * diag(2); else if ( minRow ) volume = diag(0) * diag(1); else volume = diag(1) * diag(2); // select min if ( volume < min_volume.second ) { min_volume.first = ang; min_volume.second = volume; } } // if ( true ) // std::cout << "min_volume.first (angle): " << min_volume.first << " radians " // << min_volume.first * Scalar(180.)/Scalar(M_PI) << " degrees, " << min_volume.second << " volume" << std::endl; // selected apply rotation Eigen::AngleAxisf rot( min_volume.first, frame.block<3,1>(0,2) ); frame.block<3,1>(0,0) = rot * frame.block<3,1>(0,0); frame.block<3,1>(0,1) = rot * frame.block<3,1>(0,1); // modify lookup around chosen angle for next iteration limits[0] = -step/2.f; limits[1] = limits[0] + step; } #endif _PointContainerT local_cloud; processing::cloud2Local<_PointPrimitiveT,_IndicesContainerT>( local_cloud, frame, on_plane_cloud, /* indices: */ NULL ); // no indices needed, it's already a full cloud _PointPrimitiveT min_pt, max_pt; processing::getMinMax3D<_IndicesContainerT>( min_pt, max_pt, local_cloud, /* indices: */ NULL ); minMax.resize( 4 ); minMax[0] = minMax[1] = min_pt.template pos(); minMax[1](1) = max_pt.template pos()(1); minMax[2] = minMax[3] = max_pt.template pos(); minMax[3](1) = min_pt.template pos()(1); for ( int d = 0; d != 4; ++d ) { // to world minMax[d] = (frame * (Eigen::Matrix<Scalar,4,1>() << minMax[d], Scalar(1)).finished()).template head<3>(); } this->_extents.update( minMax ); return EXIT_SUCCESS; # else std::cerr << "[" << __func__ << "]: " << "Needs PCL to work!!!!" return exit_FAILURE; # endif //...RAPTER_USE_PCL } //...getExtent() /*! \brief Calculates size, a bit smarter, than taking the area of #getExtent(). * \tparam MatrixDerived Concept: Eigen::Matrix<_Scalar,-1,1>. */ template <class _IndicesContainerT, typename MatrixDerived, typename _Scalar, class _PointContainerT > MatrixDerived& PlanePrimitive::getSpatialSignificance( MatrixDerived & in , _PointContainerT const& points , _Scalar const /*scale*/ , _IndicesContainerT * indices /*= NULL*/ , bool const return_squared /*= false */) const { #warning "[PlanePrimitive::getSpatialSignificance] TODO: Project onto normals." _IndicesContainerT tmp_population, *pop = &tmp_population; if ( !indices ) processing::getPopulationOf( tmp_population, this->getTag(TAGS::GID), points ); else pop = indices; if ( !(pop->size()) ) { std::cerr << "[" << __func__ << "]: " << "_____________NO points in primitive!!!!_____________" << std::endl; in.setConstant( _Scalar(-1.) ); } in.setConstant( _Scalar(0.) ); #if 1 // biggest eigen value Eigen::Matrix<_Scalar,3,1> eigen_values; Eigen::Matrix<_Scalar,3,3> eigen_vectors; processing::eigenDecomposition( eigen_values, eigen_vectors, points, pop ); if ( return_squared ) in(0) = eigen_values( 0 ); else in(0) = std::sqrt( eigen_values(0) ); #else // variance Eigen::Matrix<_Scalar,3,1> centroid = processing::getCentroid<_Scalar>( points, &population ); for ( size_t pid_id = 0; pid_id != population.size(); ++pid_id ) { const int pid = population[pid_id]; std::cout << "[" << __func__ << "]: " << "\tadding " \ "points[" << pid <<"].pos() (" << points[pid].template pos().transpose() << " - " << centroid.transpose() << ").squaredNorm(): " << (points[pid].template pos() - centroid).squaredNorm() << "\n"; in(0) += (points[pid].template pos() - centroid).squaredNorm(); std::cout << "\tin0 is now " << in(0) << std::endl; } in(0) /= _Scalar( population.size() ); #endif return in; } //...getSpatialSignificance() /*! \brief Draws the Plane from extrema * \tparam PointsT Concept: std::vector<pcl::PointXYZ>. */ template <class PointsT> int PlanePrimitive::draw( PointsT const& corners , pcl::visualization::PCLVisualizer::Ptr v , std::string const plane_name , double const r , double const g , double const b , int const viewport_id /* = 0 */ ) { # ifdef RAPTER_USE_PCL if ( corners.size() != 4 ) { std::cerr << "[" << __func__ << "]: " << "...cannot draw with not 4 corners" << std::endl; return EXIT_FAILURE; } Eigen::Vector3f centroid( Eigen::Vector3f::Zero() ); for ( size_t corner_id = 0; corner_id != corners.size(); ++corner_id ) { centroid += corners[corner_id].getVector3fMap(); } centroid /= static_cast<float>(corners.size()); // polygon containing the for corners spanning the plane's face pcl::PointCloud<pcl::PointXYZ>::Ptr plane_polygon_cloud_ptr( new pcl::PointCloud<pcl::PointXYZ> ); //char title[255]; for ( int corner_id = 0; corner_id < corners.size(); ++corner_id ) { plane_polygon_cloud_ptr->push_back( corners[corner_id] ); } // #pragma omp critical PCLVIS { // draw plane polygon v->addPolygon<pcl::PointXYZ>( plane_polygon_cloud_ptr, r,g,b, plane_name, viewport_id ); // v->setShapeRenderingProperties( pcl::visualization::PCL_VISUALIZER_OPACITY, .9, plane_name); v->setShapeRenderingProperties( pcl::visualization::PCL_VISUALIZER_REPRESENTATION, pcl::visualization::PCL_VISUALIZER_REPRESENTATION_SURFACE, plane_name ); } // show normal /* if ( drawNormal ) { v->addArrow( am::util::pcl::asPointXYZ( centroid + plane.Normal() * .2f ), // end point am::util::pcl::asPointXYZ( centroid ), // start point r*3., g*3., b*3., false, // show length std::string(title) + "_normal" // cloud id , viewport_id ); } // draw coorindate system if ( drawFrame ) { for ( int axis_id = 0; axis_id < 3; ++axis_id ) { char arrow_name[1024]; sprintf( arrow_name, "_arrow%d", axis_id ); v->addArrow( am::util::pcl::asPointXYZ( plane.Frame().block<3,1>(0,3) + plane.Frame().block<3,1>(0,axis_id).normalized() *.2f ), // end point am::util::pcl::asPointXYZ( plane.Frame().block<3,1>(0,3) ), // start point (2-axis_id )%3 *.45 + .1, // r (2-axis_id+1)%3 *.45 + .1, // g (2-axis_id+2)%3 *.45 + .1, // b false, // show length std::string(title) + arrow_name // cloud id , viewport_id ); //v->setShapeRenderingProperties( pcl::visualization::PCL_VISUALIZER_, 3., std::string(title) + arrow_name ); } } */ return EXIT_SUCCESS; # else return EXIT_FAILURE; # endif // RAPTER_USE_PCL } //...draw() /*! \brief Draws plane. * \tparam _PointPrimitiveT Concept: \ref rapter::PointPrimitive. * \tparam _PointContainerT Concept: std::vector< _PointPrimitiveT >. * \tparam _IndicesContainerT Concept: std::vector<int>. */ template <class _PointPrimitiveT, class _PointContainerT, class _IndicesContainerT> int PlanePrimitive::draw( PlanePrimitive const& plane , _PointContainerT const& cloud , Scalar const radius , _IndicesContainerT const* indices , pcl::visualization::PCLVisualizer::Ptr v , std::string const& plane_name , double const r , double const g , double const b , int const viewport_id /* = 0*/ , Scalar const stretch /* = Scalar( 1. ) */ , int const draw_mode /* = 0*/ , Scalar const alpha /* = 2.*/ ) { int err = EXIT_SUCCESS; //if ( stretch != Scalar(1.) ) // std::cerr << "[" << __func__ << "]: " << "WARNING, Stretch for planes is unimplemented!!!" << std::endl; typedef Eigen::Matrix<Scalar,3,1> Position; std::vector<Position> minMax; int it = 0; int max_it = 10; Scalar tmp_radius = radius; do { err = plane.getExtent<_PointPrimitiveT>( minMax , cloud , tmp_radius , indices , /* force_axis_aligned: */ (draw_mode == 1) /*true*/ ); tmp_radius *= 2.f; } while ( (minMax.size() < 2) && (++it < max_it) ); // if error or couldn't find a scale that was big enough to find "inliers" if ( (EXIT_SUCCESS != err) || (it >= max_it) ) { std::cerr << "[" << __func__ << "]: " << "plane.getExtent exceeded max radius increase iteration count...drawing unit " << plane.toString() << std::endl; v->addPlane( *plane.modelCoefficients(), plane.pos()(0), plane.pos()(1), plane.pos()(2), plane_name, 0 ); } else { std::vector<pcl::PointXYZ> ps; for ( int i = 0; i != minMax.size(); ++i ) { pcl::PointXYZ pnt; pnt.x = minMax[i](0); pnt.y = minMax[i](1); pnt.z = minMax[i](2); ps.push_back( pnt ); } if ( draw_mode <= 1 ) // 0: classic, 1: classic axis_aligned, 2: qhull err += draw( ps, v, plane_name, r, g, b, viewport_id ); else if ( draw_mode == 2 ) err += drawConvex( v, plane, cloud, indices, plane_name, r, g, b, viewport_id, alpha ); else { std::cerr << "can't recognize draw mode " << draw_mode << std::endl; err = EXIT_FAILURE; } } //v->setShapeRenderingProperties( pcl::visualization::PCL_VISUALIZER_SHADING, pcl::visualization::PCL_VISUALIZER_SHADING_FLAT, plane_name ); return err; } //...draw() /*! \brief Extract convec hull and display * \tparam PclCloudT Concept: pcl::PointCloud<pcl::PointXYZ>::Ptr plane_polygon_cloud_ptr( new pcl::PointCloud<pcl::PointXYZ> ). * \tparam PointsT Concept: std::vector<pcl::PointXYZ>. */ template < class PclCloudT, class _PointContainerT, class _IndicesContainerT> int PlanePrimitive::getHull( PclCloudT & plane_polygon_cloud , PlanePrimitive const& plane , _PointContainerT const& points , _IndicesContainerT const* indices , float const alpha /* = 2.f */ , pcl::PolygonMesh * out_mesh /* = NULL */ ) { typedef typename PclCloudT::PointType PclPointT; pcl::ConcaveHull<PclPointT> concave_hull; // object //typename pcl::PointCloud<PclPointT>::Ptr cloud_hull( new pcl::PointCloud<PclPointT>() ); typename pcl::PointCloud<PclPointT> cloud_hull; typename pcl::PointCloud<PclPointT>::Ptr cloud_projected( new typename pcl::PointCloud<PclPointT>() ); std::vector<pcl::Vertices> polygons; // output list indexing the points from cloud_hull, in 2D this is size 1 //pcl::PointCloud<PclPointT>::Ptr plane_polygon_cloud_ptr( new pcl::PointCloud<PclPointT> ); cloud_projected->resize(indices->size()); // get assigned points, project them to the plane and store as PCL cloud PidT i = 0; for ( typename _IndicesContainerT::const_iterator it = indices->begin(); it != indices->end(); ++i, ++it ) { cloud_projected->at(i).getVector3fMap() = plane.projectPoint(points[*it].pos()).template cast<float>(); } concave_hull.setAlpha( alpha ); concave_hull.setInputCloud( cloud_projected ); concave_hull.reconstruct( cloud_hull, polygons ); if ( polygons.size() ) { PidT max_size = 0, max_id = 0; for ( i = 0; i != polygons.size(); ++i ) { if ( polygons[i].vertices.size() >max_size) { max_size = polygons[i].vertices.size(); max_id = i; } } plane_polygon_cloud.resize( polygons[max_id].vertices.size() ); i = 0; for ( std::vector<uint32_t>::const_iterator it = polygons[max_id].vertices.begin(); it != polygons[max_id].vertices.end(); ++i, ++it ) { plane_polygon_cloud.at( i ) = cloud_hull.at(*it); } if ( out_mesh ) { // Perform reconstruction out_mesh->polygons = polygons; // Convert the PointCloud into a PCLPointCloud2 pcl::toPCLPointCloud2 (cloud_hull, out_mesh->cloud); } } else std::cout << "no hull" << std::endl; return plane_polygon_cloud.size(); } /*! \brief Extract convec hull and display * \tparam PointsT Concept: std::vector<pcl::PointXYZ>. */ template <class _PointContainerT, class _IndicesContainerT> int PlanePrimitive::drawConvex( pcl::visualization::PCLVisualizer::Ptr v , PlanePrimitive const& plane , _PointContainerT const& cloud , _IndicesContainerT const* indices , std::string const plane_name , double const r , double const g , double const b , int const viewport_id /* = 0 */ , float const alpha /* = 2.f */ ) { pcl::PointCloud<pcl::PointXYZ>::Ptr plane_polygon_cloud_ptr( new pcl::PointCloud<pcl::PointXYZ> ); if ( getHull(*plane_polygon_cloud_ptr, plane, cloud, indices, alpha) ) { // #pragma omp critical PCLVIS { // draw plane polygon v->addPolygon<pcl::PointXYZ>( plane_polygon_cloud_ptr, r,g,b, plane_name, viewport_id ); // v->setShapeRenderingProperties( pcl::visualization::PCL_VISUALIZER_OPACITY, .9, plane_name); v->setShapeRenderingProperties( pcl::visualization::PCL_VISUALIZER_REPRESENTATION, pcl::visualization::PCL_VISUALIZER_REPRESENTATION_SURFACE, plane_name ); } } else { std::cerr << "[" << __func__ << "]: " << "qhull returned 0 points for plane " << plane.toString() << "..." << std::endl; return EXIT_FAILURE; } return EXIT_SUCCESS; } inline int PlanePrimitive::to4Coeffs( std::vector<Scalar> &coeffs ) const { coeffs.resize( 4, Scalar(0.) ); std::copy( _coeffs.data()+3, _coeffs.data()+6, coeffs.begin() ); // calculate distance from origin coeffs[3] = Scalar(-1.) * this->normal().dot( this->pos() ); return EXIT_SUCCESS; } # ifdef RAPTER_USE_PCL inline pcl::ModelCoefficients::Ptr PlanePrimitive::modelCoefficients() const { pcl::ModelCoefficients::Ptr model_coeffs( new pcl::ModelCoefficients() ); model_coeffs->values.resize(Dim); // copy normal std::copy( _coeffs.data()+3, _coeffs.data()+6, model_coeffs->values.begin() ); // calculate distance from origin model_coeffs->values[3] = Scalar(-1.) * this->normal().dot( this->pos() ); if ( model_coeffs->values[3] != this->getDistance(Eigen::Matrix<Scalar,3,1>::Zero()) ) std::cout << "these should be similar: " << this->getDistance(Eigen::Matrix<Scalar,3,1>::Zero()) << " , " << model_coeffs->values[3] << std::endl; return model_coeffs; } # endif // RAPTER_USE_PCL inline bool PlanePrimitive::gidUnset() const { return this->getTag(PlanePrimitive::TAGS::GID) == LONG_VALUES::UNSET; } } //...ns rapter #endif // __RAPTER_INC_PLANEPRIMITIVE_HPP__ #endif #endif // __RAPTER_PLANEPRIMITIVE_H__
mc_Old_NODiscDist.c
/** * mc_Old_NODiscDist.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/01/2017} */ #include <stdio.h> #include <stdlib.h> #include <random> #include <omp.h> using namespace std; void simulateCases(double * preInten, int * simCases, int locCount, int casCount) { static std::random_device rd; static std::mt19937 rng(rd()); static std::uniform_real_distribution<double> uni(0, preInten[locCount - 1]); //[a, b) for(int i = 0; i < locCount; i++) { simCases[i] = 0; } double randomNumber; for(int i = 0; i < casCount; i++) { randomNumber = uni(rng); for(int j = 0; j < locCount; j++) { if(randomNumber < preInten[j]) { simCases[j] ++; break; } } } } int * monteCarlo(double * x, double * y, double * intensity, int locCount, int casCount, int * clusterCase, int * centerID, double * cRadius, bool * highCluster, int nClusters, int nSim) { int * nExtreme; if(NULL == (nExtreme = (int *) malloc (nClusters * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nClusters; i++) { nExtreme[i] = 0; } double * preInten; //This variable have all the intensity before a given location, for simulation purpose int * simCass; if(NULL == (preInten = (double *) malloc (locCount * sizeof(double)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } preInten[0] = intensity[0]; for(int i = 1; i < locCount; i++) { preInten[i] = preInten[i-1] + intensity[i]; } if(NULL == (simCass = (int *) malloc (locCount * sizeof(int)))) { printf("ERROR: Out of memory at line %d in file %s\n", __LINE__, __FILE__); exit(1); } for(int i = 0; i < nSim; i++) { simulateCases(preInten, simCass, locCount, casCount); #pragma omp parallel for for(int j = 0; j < nClusters; j++) { double xC = x[centerID[j]]; double yC = y[centerID[j]]; double rad2 = cRadius[j] * cRadius[j]; int simCasInc = 0; for(int k = 0; k < locCount; k++) { if((x[k] - xC) * (x[k] - xC) + (y[k] - yC) * (y[k] - yC) <= rad2) { simCasInc += simCass[k]; } } if(highCluster[j] && simCasInc >= clusterCase[j]) nExtreme[j] ++; else if(!highCluster[j] && simCasInc <= clusterCase[j]) nExtreme[j] ++; } } free(preInten); free(simCass); return nExtreme; }
math_array.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_CONTAINER_MATH_ARRAY_H_ #define CORE_CONTAINER_MATH_ARRAY_H_ #include <algorithm> #include <cassert> #include <cmath> #include <numeric> #include <ostream> #include <stdexcept> #include <utility> #include "core/util/root.h" namespace bdm { /// Array with a fixed number of elements. It implements the same behaviour /// of the standard `std::array<T, N>` container. However, it provides also /// several custom mathematical operations (e.g. Sum(), Norm() etc.). template <class T, std::size_t N> class MathArray { // NOLINT public: /// Default constructor MathArray() { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] = T(); } } /// Constructor which accepts an std::initiliazer_list to set /// the array's content. /// \param l an initializer list constexpr MathArray(std::initializer_list<T> l) { assert(l.size() == N); auto it = l.begin(); for (uint64_t i = 0; i < N; i++) { data_[i] = *(it++); } } /// Return a pointer to the underlying data. /// \return cont T pointer to the first entry of the array. inline const T* data() const { return &data_[0]; } // NOLINT /// Return the size of the array. /// \return integer denoting the array's size. inline const size_t size() const { return N; } // NOLINT /// Check if the array is empty. /// \return true if size() == 0, false otherwise. inline const bool empty() const { return N == 0; } // NOLINT /// Overloaded array subscript operator. It does not perform /// any boundary checks. /// \param idx element's index. /// \return the requested element. T& operator[](size_t idx) { return data_[idx]; } /// Const overloaded array subscript operator. /// \param idx element's index. /// \return the requested element. const T& operator[](size_t idx) const { return data_[idx]; } /// Returns the element at the given position. It will throw /// an std::out_of_range exception if the given index is out /// of the array's boundaries. /// \param idx the index of the element. /// \return the requested element. T& at(size_t idx) noexcept(false) { // NOLINT if (idx > size() || idx < 0) { throw std::out_of_range("The index is out of range"); } return data_[idx]; } const T* begin() const { return &(data_[0]); } // NOLINT const T* end() const { return &(data_[N]); } // NOLINT T* begin() { return &(data_[0]); } // NOLINT T* end() { return &(data_[N]); } // NOLINT /// Returns the element at the beginning of the array. /// \return first element. T& front() { return *(this->begin()); } // NOLINT /// Return the element at the end of the array. /// \return last element. T& back() { // NOLINT auto tmp = this->end(); tmp--; return *tmp; } /// Assignment operator. /// \param other the other MathArray instance. /// \return the current MathArray. MathArray& operator=(const MathArray& other) { if (this != &other) { assert(other.size() == N); std::copy(other.data_, other.data_ + other.size(), data_); } return *this; } /// Equality operator. /// \param other a MathArray instance. /// \return true if they have the same content, false otherwise. bool operator==(const MathArray& other) const { if (other.size() != N) { return false; } for (size_t i = 0; i < N; i++) { if (other[i] != data_[i]) { return false; } } return true; } MathArray& operator++() { #pragma omp simd for (size_t i = 0; i < N; i++) { ++data_[i]; } return *this; } MathArray operator++(int) { MathArray tmp(*this); operator++(); return tmp; } MathArray& operator--() { #pragma omp simd for (size_t i = 0; i < N; i++) { --data_[i]; } return *this; } MathArray operator--(int) { MathArray tmp(*this); operator--(); return tmp; } MathArray& operator+=(const MathArray& rhs) { assert(N == rhs.size()); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] += rhs[i]; } return *this; } MathArray operator+(const MathArray& rhs) { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs[i]; } return tmp; } const MathArray operator+(const MathArray& rhs) const { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs[i]; } return tmp; } MathArray& operator+=(const T& rhs) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] += rhs; } return *this; } MathArray operator+(const T& rhs) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] + rhs; } return tmp; } MathArray& operator-=(const MathArray& rhs) { assert(size() == rhs.size()); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] -= rhs[i]; } return *this; } MathArray operator-(const MathArray& rhs) { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs[i]; } return tmp; } const MathArray operator-(const MathArray& rhs) const { assert(size() == rhs.size()); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs[i]; } return tmp; } MathArray& operator-=(const T& rhs) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] -= rhs; } return *this; } MathArray operator-(const T& rhs) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] - rhs; } return tmp; } T& operator*=(const MathArray& rhs) = delete; T operator*(const MathArray& rhs) { assert(size() == rhs.size()); T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * rhs[i]; } return result; } const T operator*(const MathArray& rhs) const { assert(size() == rhs.size()); T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * rhs[i]; } return result; } MathArray& operator*=(const T& k) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] *= k; } return *this; } MathArray operator*(const T& k) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] * k; } return tmp; } const MathArray operator*(const T& k) const { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] * k; } return tmp; } MathArray& operator/=(const T& k) { #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] /= k; } return *this; } MathArray operator/(const T& k) { MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; i++) { tmp[i] = data_[i] / k; } return tmp; } /// Fill the MathArray with a constant value. /// \param k the constant value /// \return the array MathArray& fill(const T& k) { // NOLINT std::fill(std::begin(data_), std::end(data_), k); return *this; } /// Return the sum of all the array's elements. /// \return sum of the array's content. T Sum() const { return std::accumulate(begin(), end(), 0); } /// Compute the norm of the array's content. /// \return array's norm. T Norm() const { T result = 0; #pragma omp simd for (size_t i = 0; i < N; i++) { result += data_[i] * data_[i]; } result = std::sqrt(result); return result == 0 ? 1.0 : result; } /// Normalize the array. It will be done in-place. /// \return the normalized array. MathArray& Normalize() { T norm = Norm(); #pragma omp simd for (size_t i = 0; i < N; i++) { data_[i] /= norm; } return *this; } /// Compute the entry wise product given another array /// of the same size. /// \param rhs the other array /// \return a new array with the result MathArray EntryWiseProduct(const MathArray& rhs) { assert(rhs.size() == N); MathArray tmp; #pragma omp simd for (size_t i = 0; i < N; ++i) { tmp[i] = data_[i] * rhs[i]; } return tmp; } private: T data_[N]; BDM_CLASS_DEF_NV(MathArray, 1); // NOLINT }; template <class T, std::size_t N> std::ostream& operator<<(std::ostream& o, const MathArray<T, N>& arr) { for (size_t i = 0; i < N; i++) { o << arr[i]; if (i != N - 1) { o << ", "; } } return o; } /// Alias for a size 3 MathArray using Double3 = MathArray<double, 3>; /// Alias for a size 4 MathArray using Double4 = MathArray<double, 4>; } // namespace bdm #endif // CORE_CONTAINER_MATH_ARRAY_H_
lastpass_fmt_plug.c
/* LastPass offline cracker patch for JtR. Hacked together during January of 2013 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * All the hard work was done by Milen (author of hashkill). * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_lastpass; #elif FMT_REGISTERS_H john_register_one(&fmt_lastpass); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/aes.h> #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "lp" #define FORMAT_NAME "LastPass offline" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests lastpass_tests[] = { {"$lp$hackme@mailinator.com$6f5d8cec3615fc9ac7ba2e0569bce4f5", "strongpassword"}, {"$lp$3$27c8641d7f5ab5985569d9d0b499b467", "123"}, {"$lp$ninechars$d09153108a89347da5c97a4a18f91345", "PassWord"}, {"$lp$anicocls$764b0f54528eb4a4c93aab1b18af28a5", ""}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; int salt_length; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; if (strncmp(ciphertext, "$lp$", 4)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 4; if ((p = strtokm(ctcopy, "$")) == NULL) /* email */ goto err; if (strlen(p) > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hash */ goto err; if (hexlenl(p) != 32) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 4; /* skip over "$lp$" */ p = strtokm(ctcopy, "$"); strncpy((char*)cs.salt, p, 32); cs.salt_length = strlen((char*)p); MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { AES_KEY akey; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; ARCH_WORD_32 key[MAX_KEYS_PER_CRYPT][8]; union { ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = key[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, 500, &(x.poutc), 32, 0); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key[i], 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[i+index], &akey, AES_ENCRYPT); } #else unsigned char key[32]; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->salt_length, 500, key, 32, 0); memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key, 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[index], &akey, AES_ENCRYPT); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void lastpass_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_lastpass = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, lastpass_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, lastpass_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <functional> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, IdentifierInfo *AttrName); private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, SourceRange Range, StringRef Name, bool Override, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_subassign_11.c
//------------------------------------------------------------------------------ // GB_subassign_11: C(I,J)<M,repl> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 11: C(I,J)<M,repl> += scalar ; using S // M: present // Mask_comp: false // C_replace: true // accum: present // A: scalar // S: constructed // C, M: not bitmap #include "GB_unused.h" #include "GB_subassign_methods.h" GrB_Info GB_subassign_11 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap GB_GET_MASK ; GB_GET_ACCUM_SCALAR ; GB_GET_S ; //-------------------------------------------------------------------------- // Method 11: C(I,J)<M,repl> += scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal. All entries in M+S must be examined. All entries in S // are modified: if M(i,j)=1 then S(i,j) is used to write to the // corresponding entry in C. If M(i,j) is not present, or zero, then the // entry in C is cleared (because of C_replace). If S(i,j) is not present, // and M(i,j)=1, then the scalar is inserted into C. The only case that // can be skipped is if neither S nor M is present. As a result, this // method need not traverse all of IxJ. It can limit its traversal to the // pattern of M+S. // Method 09 and Method 11 are very similar. //-------------------------------------------------------------------------- // Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (M_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all M+S GB_SUBASSIGN_TWO_SLICE (M, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (M_is_bitmap) { //---------------------------------------------------------------------- // phase1: M is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iM_start:iM_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iM_start) ; int64_t pM_start = j * Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j) //-------------------------------------------------------------- for (int64_t iM = iM_start ; iM < iM_end ; iM++) { int64_t pM = pM_start + iM ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ; bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ; if (Sfound && !mij) { // S (i,j) is present but M (i,j) is false // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (!Sfound && mij) { // S (i,j) is not present, M (i,j) is true // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && mij) { // S (i,j) present and M (i,j) is true GB_C_S_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: M is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get S(:,j) and M(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and M(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iM = GBI (Mi, pM, Mvlen) ; if (iS < iM) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_C_S_LOOKUP ; if (GB_mcast (Mx, pM, msize)) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (M) ; } } // while list S (:,j) has entries. List M (:,j) exhausted. while (pS < pS_end) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list M (:,j) has entries. List S (:,j) exhausted. while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (M_is_bitmap) { //---------------------------------------------------------------------- // phase2: M is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iM_start:iM_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iM_start) ; int64_t pM_start = j * Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iM = iM_start ; iM < iM_end ; iM++) { int64_t pM = pM_start + iM ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ; bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ; if (!Sfound && mij) { // S (i,j) is not present, M (i,j) is true // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: M is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get S(:,j) and M(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and M(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iM = GBI (Mi, pM, Mvlen) ; if (iS < iM) { // S (i,j) is present but M (i,j) is not GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_NEXT (S) ; GB_NEXT (M) ; } } // while list M (:,j) has entries. List S (:,j) exhausted. while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iM = GBI (Mi, pM, Mvlen) ; int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
GB_unop__atan_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fc32_fc32) // op(A') function: GB (_unop_tran__atan_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = catanf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = catanf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = catanf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = catanf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = catanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fastexp.h
#ifndef FASTEXP_H #define FASTEXP_H #include "math.h" #include <cstdint> #include <cstddef> #include <vector> #include "FastExp/product.h" #include "FastExp/ieee.h" namespace fastexp { enum class Approximation {IEEE, PRODUCT}; /** \brief Fast approximate exponential. * * This function implements a fast, vectorizable approximation * of the exponential function based on the following two articles: * * - Malossi, A. Cristiano I. & Ineichen, Yves & Bekas, Costas & Curioni, * Alessandro. "Fast Exponential Computation on SIMD Architectures." (2015) * 10.13140/2.1.4362.3207. * - IEEE, Nicol N. "A fast, compact approximation of the exponential * function." Neural Computation 11.4 (1999): 853-862. * * The approximation interpolates linearly between points on the curve of * the exponential function that can be expressed as 2^i where i is an * a signed integer. So yes, that is very approximate ... * * \tparam Real The floating point type of the arguments. * \param x The argument of the exponential function. * \return The approximated value of the exponential function. */ #ifndef _WIN32 #pragma omp declare simd notinbranch #endif template < typename Real, template<typename, size_t> class Approximation = IEEE, size_t degree = 2 > inline Real exp(const Real &x) { return Approximation<Real, degree>::evaluate(x); } /** \brief Fast approximate array exponential. * * Applies the fast exponential to an array of given length making * use of SIMD instructions if available. To enable vectorization * the code needs to be compiled with OpenMP support. * * \tparam Real The floating point type of the arguments. * \param x The array to which apply the exponential function. * \return n The number of elements in the array. */ template < typename Real, template<typename, size_t> class Approximation = IEEE, size_t degree = 2 > inline void exp(Real *x, size_t n) { // Vectorized part. #pragma omp simd for (size_t i = 0; i < n; ++i) { Real e = fastexp::exp<Real, Approximation, degree>(x[i]); x[i] = e; } } template < typename Real, template<typename, size_t> class Approximation = IEEE, size_t degree = 2 > inline void exp(std::vector<Real> x) { // Vectorized part. size_t n = x.size(); Real * x_ptr = &x[0]; #pragma omp simd for (size_t i = 0; i < n; ++i) { Real e = fastexp::exp<Real, Approximation, degree>(x_ptr[i]); x_ptr[i] = e; } } } // fastexp #endif // FASTEXP_H
GB_unop__identity_fp32_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_int8 // op(A') function: GB_unop_tran__identity_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_int8 ( float *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduce3.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * reduce3.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef REDUCE3_H_ #define REDUCE3_H_ #define EXTRA_PARAMS_LENGTH 10 #include <templatemath.h> #include <helper_cuda.h> #include <helpers/sharedmem.h> #ifdef _OPENMP #include <omp.h> #endif #include <pairwise_util.h> #include <dll.h> #include <helpers/shape.h> #include <ops/ops.h> #include <op_boilerplate.h> #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif #ifndef _OPENMP #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif #include "legacy_ops.h" namespace functions { namespace reduce3 { /** * Reduce involving * 2 arrays */ template<typename T> class Reduce3 { public: #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T *extraParamsRef) = 0; #endif #ifdef __CUDACC__ /** * Aggregate shared memory * @param sPartialsRef * @param tid * @param extraParams */ template<typename OpType> static __inline__ __device__ void aggregatePartials(T **sPartialsRef, Nd4jLong tid, Nd4jLong numItems, T *extraParamsRef) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. T *sPartials = *sPartialsRef; Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParamsRef); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads) { sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParamsRef); } __syncthreads(); } } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ virtual __inline__ __device__ void transformNoElementWiseStride( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { Nd4jLong n = shape::length(xShapeInfo); int rank = shape::rank(xShapeInfo); T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); T startingVal = this->startingValue(dx); // FIXME: this ugly fast fix. __shared__ T extraZ[2]; if (threadIdx.x == 0) { extraZ[0] = (T) 0.0; extraZ[1] = (T) 0.0; } sPartials[threadIdx.x] = startingVal; __syncthreads(); Nd4jLong idx[MAX_RANK]; for(Nd4jLong i = blockIdx.x * gridDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, idx); auto offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank); auto yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], extraZ), extraZ); } T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraZ); /** * Look at something that uses the extra params * and aggregates the extra values propelry. *This will be used in summary stats too. */ // write result for this block to global mem if (threadIdx.x == 0) { if (postProcessOrNot) { result[blockIdx.x] = postProcess(sPartials[0], n, extraZ); } else { result[blockIdx.x] = sPartials[0]; } } } /** * */ template<typename OpType> static inline __device__ void execScalarCuda( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { // SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); // val.getPointer(); // FIXME: this ugly fast fix. __shared__ T extraZ[3]; if (threadIdx.x == 0) { extraZ[0] = (T) 0.0f; extraZ[1] = (T) 0.0f; if (extraParams != NULL) { extraZ[2] = extraParams[0]; } else extraZ[2] = (T) 0.0f; } __syncthreads(); T startingVal = OpType::startingValue(dx); Nd4jLong length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); int yElementWiseStride = shape::elementWiseStride(yShapeInfo); int tid = blockIdx.x * blockDim.x + threadIdx.x; char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder == yOrder && (xElementWiseStride > 0 && yElementWiseStride > 0) && shape::strideDescendingCAscendingF(xShapeInfo) && shape::strideDescendingCAscendingF(yShapeInfo)) { if (xElementWiseStride == 1 && yElementWiseStride == 1) { for(Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { startingVal = OpType::update(startingVal, OpType::opAtomic(dx[i], dy[i], extraZ), extraZ); } } else { for(Nd4jLong i = tid; i < length; i+= gridDim.x * blockDim.x) { startingVal = OpType::update(startingVal, OpType::opAtomic(dx[i * xElementWiseStride], dy[i * yElementWiseStride], extraZ), extraZ); } } sPartials[threadIdx.x] = startingVal; } else { __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ int rank; if (threadIdx.x == 0) { xShape = shape::shapeOf(xShapeInfo); yShape = shape::shapeOf(yShapeInfo); xStride = shape::stride(xShapeInfo); yStride = shape::stride(yShapeInfo); rank = shape::rank(xShapeInfo); } __syncthreads(); T startingVal = OpType::startingValue(dx); T *sPartials = (T *) manager->getSharedReductionBuffer(); Nd4jLong xCoords[MAX_RANK]; Nd4jLong yCoords[MAX_RANK]; sPartials[threadIdx.x] = startingVal; for(Nd4jLong i = tid ;i < length; i += gridDim.x * blockDim.x) { shape::ind2subC(rank,xShape,i,xCoords); shape::ind2subC(rank,yShape,i,yCoords); auto offset = shape::getOffset(0, xShape, xStride, xCoords,rank); auto yOffset = shape::getOffset(0,yShape, yStride, yCoords,rank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[offset], dy[yOffset], extraZ), extraZ); } } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, length), extraZ); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; int rank = shape::rank(xShapeInfo); tid = threadIdx.x; T *extraBuffer = (T *) allocationPointer; if (threadIdx.x == 0) { reductionBuffer[blockIdx.x] = sPartials[0]; extraBuffer[blockIdx.x] = extraZ[0]; extraBuffer[gridDim.x + blockIdx.x] = extraZ[1]; } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } sPartials[tid] = startingVal; __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(dx); // TODO: later probably replace this. Right now we need extraZ sync for CosineSimilarity ONLY if (tid == 0 && extraZ[0] != (T) 0.0 && extraZ[1] != (T) 0.0) { extraZ[0] = 0.0; extraZ[1] = 0.0; for (int i = 0; i < gridDim.x; i++) { extraZ[0] += extraBuffer[i]; extraZ[1] += extraBuffer[gridDim.x + i]; } } for (Nd4jLong i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraZ); } __syncthreads(); aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraZ); __syncthreads(); if (threadIdx.x == 0) { result[0] = OpType::postProcess(sPartials[0], length, extraZ); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; result[0] = OpType::postProcess(sPartials[0], length, extraZ); } } } template<typename OpType> __device__ static inline void transformAll( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { // initialize partials first T *sPartials = (T *) manager->getSharedReductionBuffer(); T startingVal = OpType::startingValue(dx); sPartials[threadIdx.x] = startingVal; T *tempX = sPartials + blockDim.x; const int maxBlock = blockDim.x; __shared__ T extraZ[OpType::extraParamsLen > 0 ? OpType::extraParamsLen : 1]; __shared__ int xTadLength; __shared__ int yTadLength; __shared__ int xTads; __shared__ int yTads; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; __shared__ int xRank; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *yStride; __shared__ int yRank; //reading initial data if (threadIdx.x == 0) { xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); yTadLength = shape::tadLength(yShapeInfo, dimension, dimensionLength); xTads = shape::length(xShapeInfo) / xTadLength; yTads = shape::length(yShapeInfo) / yTadLength; xShape = shape::shapeOf(xTadShapeInfo); xStride = shape::stride(xTadShapeInfo); xRank = shape::rank(xTadShapeInfo); yShape = shape::shapeOf(yTadShapeInfo); yStride = shape::stride(yTadShapeInfo); yRank = shape::rank(yTadShapeInfo); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; int limit = xTadLength / maxBlock; if (xTadLength % maxBlock > 0) limit++; for (int r = blockIdx.x; r < xTads; r += blockDim.x * gridDim.x) { T *x = dx + xOffsets[r]; if (threadIdx.x < xTadLength && threadIdx.x < maxBlock) { if (shape::order(xTadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, threadIdx.x, xCoord); } else { shape::ind2sub(xRank, xShape, threadIdx.x, xCoord); } auto xO = shape::getOffset(0, xShape, xStride, xCoord, xRank); tempX[threadIdx.x] = x[xO]; } for (int g = 0; g < yTads; g++) { T *y = dy + yOffsets[g]; int ri = (r * yTads) + g; sPartials[threadIdx.x] = startingVal; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); // we might have data too large for single cache block, rendering cache useless though :( for (int t = 0; t < limit; t++) { // we reset tempX IF we have >1 tiles if (t >= 1 || (limit > 1 && g > 0)) if (threadIdx.x + (t * maxBlock) < xTadLength) { if (shape::order(xTadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, threadIdx.x + (t * maxBlock), xCoord); } else { shape::ind2sub(xRank, xShape, threadIdx.x + (t * maxBlock), xCoord); } Nd4jLong xO = shape::getOffset(0, xShape, xStride, xCoord, xRank); tempX[threadIdx.x] = x[xO]; // tempX[threadIdx.x] = x[threadIdx.x + (t * maxBlock)]; } for (int f = threadIdx.x + (t * maxBlock); f < xTadLength && f < threadIdx.x + ((t + 1) * maxBlock); f += blockDim.x * gridDim.x) { if (shape::order(yTadShapeInfo) == 'c') { shape::ind2subC(yRank, yShape, f, yCoord); } else { shape::ind2sub(yRank, yShape, f, yCoord); } Nd4jLong yO = shape::getOffset(0, yShape, yStride, yCoord, yRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::opAtomic(tempX[threadIdx.x], y[yO], extraZ), extraZ); } // we MUST step through this block altogether __syncthreads(); } T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, xTadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) { result[ri] = OpType::postProcess(sPartials[threadIdx.x],xTadLength, extraZ); } __syncthreads(); } } } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ template<typename OpType> __device__ static inline void transform( T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { /** * Gpu information for the problem */ int tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int resultScalar; __shared__ int xElementWiseStride; __shared__ int yElementWiseStride; //shared memory space for storing intermediate results //SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); T init = OpType::startingValue(dx); sPartials[threadIdx.x] = init; __shared__ T extraZ[OpType::extraParamsLen > 0 ? OpType::extraParamsLen : 1]; //length for the tad __shared__ Nd4jLong resultLength; __shared__ int tadLength; __shared__ int yLength; __shared__ int tadElementWiseStride; __shared__ int yTadElementWiseStride; T startingVal = OpType::startingValue(dx); T reduction = OpType::startingValue(dx); if (threadIdx.x == 0) { if (resultShapeInfo != nullptr) resultLength = shape::length(resultShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (dimension == nullptr || dimension[0] == MAX_DIMENSION) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); char xOrder = shape::order(xShapeInfo); tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadElementWiseStride = shape::elementWiseStride(tadOnlyShapeInfo); yLength = shape::length(yShapeInfo); if (yTadOnlyShapeInfo != nullptr) yTadElementWiseStride = shape::elementWiseStride(yTadOnlyShapeInfo); } __syncthreads(); // code branch for TAD vs full array if (tadLength == yLength) { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto xShape = shape::shapeOf(tadOnlyShapeInfo); auto xStride = shape::stride(tadOnlyShapeInfo); int yRank = shape::rank(yShapeInfo); int xRank = shape::rank(tadOnlyShapeInfo); for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) { int xOffsetForTad = tadOffsets[i]; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); for(int j = threadIdx.x; j < tadLength; j += blockDim.x) { shape::ind2subC(xRank,xShape, j, xCoord); shape::ind2subC(yRank,yShape, j, yCoord); Nd4jLong xOffset = shape::getOffset(xOffsetForTad, xShape, xStride, xCoord, xRank); Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); sPartials[threadIdx.x] = j < blockDim.x ? OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ) : OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ), extraZ); } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ); __syncthreads(); } } else if (!resultScalar) { if(tadElementWiseStride >= 1 && yTadElementWiseStride) { for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) { int xOffsetForTad = tadOffsets[i]; int yOffsetForTad = yTadOffsets[i]; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); if (threadIdx.x < tadLength) sPartials[threadIdx.x] = OpType::op(dx[xOffsetForTad + tadElementWiseStride * threadIdx.x],dy[yOffsetForTad + yTadElementWiseStride * threadIdx.x], extraZ); for(int j = threadIdx.x + blockDim.x; j < tadLength; j += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffsetForTad + tadElementWiseStride * j],dy[yOffsetForTad + yTadElementWiseStride * j], extraZ), extraZ); } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ); __syncthreads(); } } else { /* // DO NOT REMOVE THIS COMMENTED BLOCK PLEASE for (int r = blockIdx.x; r < tad->numTads; r += gridDim.x) { if (threadIdx.x == 0) tad->createOffsetForBlock(r); __syncthreads(); int tadOffsetForBlock = tad->tadOffsetForBlock; T *xVal = dx + tadOffsetForBlock; sPartials[threadIdx.x] = this->startingValue(xVal); for(int i = threadIdx.x; i < tad->tadLength; i+= blockDim.x) { int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr); int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr); sPartials[threadIdx.x] = this->update(sPartials[threadIdx.x],dx[tadOffsetForBlock + i * tad->tadElementWiseStride], extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tad->tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = this->postProcess(sPartials[threadIdx.x], tad->tadLength, extraParams); } */ Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; auto yShape = shape::shapeOf(yTadOnlyShapeInfo); auto yStride = shape::stride(yTadOnlyShapeInfo); auto xShape = shape::shapeOf(tadOnlyShapeInfo); auto xStride = shape::stride(tadOnlyShapeInfo); int yRank = shape::rank(yTadOnlyShapeInfo); int xRank = shape::rank(tadOnlyShapeInfo); for(int i = blockIdx.x; i < resultLength; i+= gridDim.x) { auto xOffsetForTad = tadOffsets[i]; auto yOffsetForTad = yTadOffsets[i]; if (OpType::extraParamsLen > 0 && threadIdx.x < OpType::extraParamsLen) { extraZ[threadIdx.x] = (T) startingVal; } __syncthreads(); for(int j = threadIdx.x; j < tadLength; j += blockDim.x) { shape::ind2subC(xRank,xShape, j, xCoord); shape::ind2subC(yRank,yShape, j, yCoord); auto xOffset = shape::getOffset(xOffsetForTad, xShape, xStride, xCoord, xRank); auto yOffset = shape::getOffset(yOffsetForTad, yShape, yStride, yCoord, yRank); sPartials[threadIdx.x] = j < blockDim.x ? OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ) : OpType::update(sPartials[threadIdx.x], OpType::opAtomic(dx[xOffset],dy[yOffset], extraZ), extraZ); } __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials<OpType>(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraZ); __syncthreads(); if (threadIdx.x == 0) result[i] = OpType::postProcess(sPartials[threadIdx.x],tadLength, extraZ); __syncthreads(); } } } } #endif #ifdef __CUDACC__ __device__ static inline void exec( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { DISPATCH_BY_OPNUM(transform, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), REDUCE3_OPS); } __device__ static inline void execAllCuda( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { DISPATCH_BY_OPNUM(transformAll, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets), REDUCE3_OPS); } __device__ static inline void execScalarCuda( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int * allocationPointer, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_BY_OPNUM(execScalarCuda, PARAMS(dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, manager, tadOnlyShapeInfo), REDUCE3_OPS); } #endif #ifdef __CUDACC__ __host__ #endif static T execScalar( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo) { RETURNING_DISPATCH_BY_OPNUM(execScalar, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo), REDUCE3_OPS); } static void exec( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { DISPATCH_BY_OPNUM(exec, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer, dimension, dimensionLength), REDUCE3_OPS); } static void exec( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM(exec, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer, dimension, dimensionLength, tadShapeInfo, tadOffsets), REDUCE3_OPS); } static void execAll( const int opNum, T *x, Nd4jLong *xShapeInfo, T *extraParamsVals, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { DISPATCH_BY_OPNUM(execAll, PARAMS(x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer, dimension, dimensionLength, xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets), REDUCE3_OPS); } template<typename OpType> #ifdef __CUDACC__ __host__ #endif static T execScalar( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo) { T startingVal = OpType::startingValue(x); Nd4jLong length = shape::length(xShapeInfo); Nd4jLong xElementWiseStride = shape::elementWiseStride(xShapeInfo); Nd4jLong yElementWiseStride = shape::elementWiseStride(yShapeInfo); T extraParamsVals[3] = {(T) 0.0, (T) 0.0, (T) 0.0}; // it's possible case for EqualsWithEps op if (extraParams != nullptr) { extraParamsVals[2] = extraParams[0]; } char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder == yOrder && (xElementWiseStride >=1 && yElementWiseStride >= 1) && shape::strideDescendingCAscendingF(xShapeInfo) && shape::strideDescendingCAscendingF(yShapeInfo)) { if (xElementWiseStride == 1 && yElementWiseStride == 1) { // TODO:: proper reduction required here for(int i = 0; i < length; i++) { startingVal = OpType::update(startingVal, OpType::op(x[i],y[i], extraParamsVals), extraParamsVals); } return OpType::postProcess(startingVal, length, extraParamsVals); } else { // TODO:: proper reduction required here for(Nd4jLong i = 0; i < length; i++) { startingVal = OpType::update(startingVal, OpType::op(x[i * xElementWiseStride],y[i * yElementWiseStride], extraParamsVals), extraParamsVals); } return OpType::postProcess(startingVal, length, extraParamsVals); } } else { Nd4jLong xCoords[MAX_RANK]; Nd4jLong yCoords[MAX_RANK]; int xRank = shape::rank(xShapeInfo); int yRank = shape::rank(yShapeInfo); Nd4jLong *xShape = shape::shapeOf(xShapeInfo); Nd4jLong *xStride = shape::stride(xShapeInfo); Nd4jLong *yShape = shape::shapeOf(yShapeInfo); Nd4jLong *yStride = shape::stride(yShapeInfo); for(unsigned int i = 0 ;i < length; i++) { shape::ind2subC(xRank, xShape, i, xCoords); shape::ind2subC(yRank, yShape, i, yCoords); Nd4jLong offset = shape::getOffset(0, xShape, xStride, xCoords, xRank); Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoords, yRank); startingVal = OpType::update(startingVal, OpType::op(x[offset], y[yOffset], extraParamsVals), extraParamsVals); } } return OpType::postProcess(startingVal, length, extraParamsVals);; } template<typename OpType> static void execAll( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, Nd4jLong *yTadShapeInfo, Nd4jLong *yOffsets) { auto xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); auto yTadLength = shape::tadLength(yShapeInfo, dimension, dimensionLength); auto xTads = shape::length(xShapeInfo) / xTadLength; auto yTads = shape::length(yShapeInfo) / yTadLength; auto xShape = shape::shapeOf(xTadShapeInfo); auto xStride = shape::stride(xTadShapeInfo); int xRank = shape::rank(xTadShapeInfo); auto yShape = shape::shapeOf(yTadShapeInfo); auto yStride = shape::stride(yTadShapeInfo); int yRank = shape::rank(yTadShapeInfo); Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; T startingVal = OpType::startingValue(x); #pragma omp parallel for proc_bind(AFFINITY) default(shared) private(xCoord, yCoord) for (Nd4jLong r = 0; r < xTads; r++) { Nd4jLong xOffset = xOffsets[r]; T *lX = x + xOffset; for (Nd4jLong g = 0; g < yTads; g++) { auto yOffset = yOffsets[g]; T *lY = y + yOffset; auto ri = (r * yTads) + g; T *localExtraParams = nullptr; if (OpType::extraParamsLen > 0) localExtraParams = new T[OpType::extraParamsLen]; for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } for (int f = 0; f < xTadLength; f++) { if (shape::order(yTadShapeInfo) == 'c') { shape::ind2subC(yRank, yShape, f, yCoord); } else { shape::ind2sub(yRank, yShape, f, yCoord); } if (shape::order(xTadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, f, xCoord); } else { shape::ind2sub(xRank, xShape, f, xCoord); } Nd4jLong xO = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jLong yO = shape::getOffset(0, yShape, yStride, yCoord, yRank); result[ri] = OpType::update(result[ri], OpType::op(lX[xO], lY[yO], localExtraParams), localExtraParams); } result[ri] = OpType::postProcess(result[ri], xTadLength, localExtraParams); if (localExtraParams != nullptr) delete[] localExtraParams; } } } template<typename OpType> static void exec( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { /* nd4j_printf("Xp: [%p]; Yp: [%p]; Zp: [%p];\n", (void *) x, (void *) y, (void *) result); nd4j_printf("XSp: [%p]; YSp: [%p]; ZSp: [%p];\n", (void *) xShapeInfo, (void *) yShapeInfo, (void *) resultShapeInfoBuffer); nd4j_printf("Ep: [%p]; Dp: [%p]\n", (void *) extraParams, (void *) dimension); nd4j_printf("TSp: [%p]; TOp: [%p]\n", (void *) tadShapeInfo, (void *) tadOffsets); nd4j_printf("X[0]: %f\n", x[0]); nd4j_printf("Y[0]: %f\n", y[0]); nd4j_printf("Z[0]: %f\n", result[0]); nd4j_printf("XS[0]: %i\n", xShapeInfo[0]); nd4j_printf("YS[0]: %i\n", yShapeInfo[0]); nd4j_printf("ZS[0]: %i\n", resultShapeInfoBuffer[0]); nd4j_printf("E[0]: %f\n", extraParams[0]); nd4j_printf("D[0]: %i\n", dimension[0]); nd4j_printf("TS[0]: %i\n", tadShapeInfo[0]); nd4j_printf("TO[0]: %lld\n", tadOffsets[0]); nd4j_printf("dimLength: %i\n", dimensionLength); */ T startingVal = OpType::startingValue(x); auto tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); auto tads = shape::length(xShapeInfo) / tadLength; auto *xShape = shape::shapeOf(tadShapeInfo); auto *xStride = shape::stride(tadShapeInfo); int xRank = shape::rank(tadShapeInfo); auto *yShape = shape::shapeOf(yShapeInfo); auto *yStride = shape::stride(yShapeInfo); int yRank = shape::rank(yShapeInfo); //shape::printShapeInfoLinear(xShapeInfo); //shape::printShapeInfoLinear(yShapeInfo); //shape::printShapeInfoLinear(resultShapeInfoBuffer); //shape::printShapeInfoLinear(tadShapeInfo); Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; //#pragma omp parallel for proc_bind(AFFINITY) default(shared) for (Nd4jLong r = 0; r < tads; r++) { Nd4jLong offset = tadOffsets[r]; T *localExtraParams = nullptr; if (OpType::extraParamsLen > 0) localExtraParams = new T[OpType::extraParamsLen]; for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } for (Nd4jLong f = 0; f < tadLength; f++) { if (shape::order(tadShapeInfo) == 'c') { shape::ind2subC(xRank, xShape, f, xCoord); shape::ind2subC(yRank, yShape, f, yCoord); } else { shape::ind2sub(xRank, xShape, f, xCoord); shape::ind2sub(yRank, yShape, f, yCoord); } Nd4jLong xOffset = shape::getOffset(offset, xShape, xStride, xCoord, xRank); Nd4jLong yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); result[r] = OpType::update(result[r], OpType::op(x[xOffset], y[yOffset], localExtraParams), localExtraParams); } result[r] = OpType::postProcess(result[r], tadLength, localExtraParams); if (localExtraParams != nullptr) delete[] localExtraParams; } } template<typename OpType> static void exec( T *x, Nd4jLong *xShapeInfo, T *extraParams, T *y, Nd4jLong *yShapeInfo, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { /* nd4j_printf("Xp: [%p]; Yp: [%p]; Zp: [%p];\n", (void *) x, (void *) y, (void *) result); nd4j_printf("XSp: [%p]; YSp: [%p]; ZSp: [%p];\n", (void *) xShapeInfo, (void *) yShapeInfo, (void *) resultShapeInfoBuffer); nd4j_printf("Ep: [%p]; Dp: [%p]\n", (void *) extraParams, (void *) dimension); nd4j_printf("X[0]: %f\n", x[0]); nd4j_printf("Y[0]: %f\n", y[0]); nd4j_printf("Z[0]: %f\n", result[0]); nd4j_printf("XS[0]: %i\n", xShapeInfo[0]); nd4j_printf("YS[0]: %i\n", yShapeInfo[0]); nd4j_printf("ZS[0]: %i\n", resultShapeInfoBuffer[0]); nd4j_printf("E[0]: %f\n", extraParams[0]); nd4j_printf("D[0]: %i\n", dimension[0]); nd4j_printf("dimLength: %i\n", dimensionLength); */ T extraParamsVals[3] = {(T) 0.0, (T) 0.0, (T) 0.0}; if(shape::isScalar(resultShapeInfoBuffer)) { result[0] = execScalar<OpType>( x, xShapeInfo, extraParamsVals, y, yShapeInfo); return; } char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder != yOrder) { Nd4jLong shapeIter[MAX_RANK]; Nd4jLong coord[MAX_RANK]; int dim; Nd4jLong xStridesIter[MAX_RANK]; Nd4jLong yStridesIter[MAX_RANK]; auto xShape = shape::shapeOf(xShapeInfo); auto xStride = shape::stride(xShapeInfo); auto yStride = shape::stride(yShapeInfo); int rank = shape::rank(xShapeInfo); if(PrepareTwoRawArrayIter<T>(rank, xShape, x, xStride, y, yStride, &rank, shapeIter, &x, xStridesIter, &y, yStridesIter) >= 0) { auto resultLength = shape::length(resultShapeInfoBuffer); auto tadLength = shape::tadLength(xShapeInfo,dimension,dimensionLength); ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { Nd4jLong xOffset = shape::getOffset(0,xShape,xStride,coord,rank); auto reductionIndex = xOffset / resultLength; result[reductionIndex] = OpType::update(result[reductionIndex], OpType::op(x[0],y[0], extraParamsVals), extraParamsVals); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, x, xStridesIter, y, yStridesIter); //#pragma omp parallel for proc_bind(AFFINITY) default(shared) for(Nd4jLong i = 0; i < resultLength ;i++) { result[i] = OpType::postProcess(result[i],tadLength, extraParamsVals); } } else { printf("Unable to prepare array\n"); } } else { T startingVal = OpType::startingValue(x); Nd4jLong resultLength = shape::length(resultShapeInfoBuffer); shape::TAD xTad(xShapeInfo, dimension, dimensionLength); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); shape::TAD yTad(yShapeInfo, dimension, dimensionLength); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); /** * The element wise stride belong longs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along long arr * we can use arr.stride(1) as a representation * along long which to iterate. */ int largerElementWiseStride; int smallerElementWiseStride; auto xElementWiseStride = shape::elementWiseStride(xTad.tadOnlyShapeInfo); auto yElementWiseStride = shape::elementWiseStride(yTad.tadOnlyShapeInfo); int tadLength; Nd4jLong xModLength; Nd4jLong yModLength; Nd4jLong *iterationTadInfo; bool xTadBigger; if(shape::length(xShapeInfo) > shape::length(yShapeInfo)) { tadLength = shape::length(xTad.tadOnlyShapeInfo); iterationTadInfo = xTad.tadOnlyShapeInfo; largerElementWiseStride = shape::elementWiseStride(xShapeInfo); smallerElementWiseStride = shape::elementWiseStride(yShapeInfo); xModLength = 1; yModLength = tadLength; xTadBigger = true; } else { tadLength = shape::length(yTad.tadOnlyShapeInfo); iterationTadInfo = yTad.tadOnlyShapeInfo; largerElementWiseStride = shape::elementWiseStride(yShapeInfo); smallerElementWiseStride = shape::elementWiseStride(xShapeInfo); xModLength = tadLength; yModLength = 1; xTadBigger = false; } if (largerElementWiseStride >= 1 && smallerElementWiseStride >= 1 && xElementWiseStride >= 1 && yElementWiseStride >= 1) { if(shape::length(xShapeInfo) == shape::length(yShapeInfo)) { //#pragma omp parallel for proc_bind(AFFINITY) default(shared) for (Nd4jLong i = 0; i < resultLength; i++) { T *localExtraParams = nullptr; if (OpType::extraParamsLen > 0) localExtraParams = new T[OpType::extraParamsLen]; for (int extraParamsIdx = 0; extraParamsIdx < OpType::extraParamsLen; extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } Nd4jLong offset = xTad.tadOffsets[i]; Nd4jLong yOffset = yTad.tadOffsets[i]; result[i] = OpType::op(x[offset], y[yOffset], localExtraParams); for (int j = 1; j < tadLength; j++) { int xIdx = (offset + xElementWiseStride * j); int yIdx = (yOffset + yElementWiseStride * j); result[i] = OpType::update(result[i], OpType::op(x[xIdx], y[yIdx], localExtraParams), localExtraParams); } result[i] = OpType::postProcess(result[i], tadLength, localExtraParams); if (localExtraParams != nullptr) delete[] localExtraParams; } } else { int tadsPerThread = resultLength / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); //#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) for (int i = 0; i < resultLength; i++) { Nd4jLong xOffset = xTadBigger ? xTad.tadOffsets[i] : 0; Nd4jLong yOffset = !xTadBigger ? yTad.tadOffsets[i] : 0; auto xShape = xTadBigger ? xTad.tadShape : shape::shapeOf(xShapeInfo); auto yShape = !xTadBigger ? yTad.tadShape : shape::shapeOf(yShapeInfo); auto xStride = xTadBigger ? xTad.tadStride : shape::stride(xShapeInfo); auto yStride = !xTadBigger ? yTad.tadStride : shape::stride(yShapeInfo); int xRank = xTadBigger ? shape::rank(xTad.tadOnlyShapeInfo) : shape::rank(xShapeInfo); int yRank = !xTadBigger ? shape::rank(yTad.tadOnlyShapeInfo) : shape::rank(yShapeInfo); Nd4jLong coord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; T start = OpType::startingValue(x); for (int j = 0; j < tadLength; j++) { if(xTadBigger) { shape::ind2subC(shape::rank(xTad.tadOnlyShapeInfo), xTad.tadStride, j, coord); shape::ind2subC(shape::rank(yShapeInfo), shape::shapeOf(yShapeInfo), j, yCoord); } else { shape::ind2subC(shape::rank(xShapeInfo), shape::shapeOf(xShapeInfo), j, coord); shape::ind2subC(shape::rank(yTad.tadOnlyShapeInfo), yTad.tadShape, j, yCoord); } int xOffset2 = shape::getOffset(xOffset,xShape,xStride,coord,xRank); int yOffset2 = shape::getOffset(yOffset,yShape,yStride,yCoord,yRank); start = OpType::update(start, OpType::op(x[xOffset2], y[yOffset2],extraParams), extraParamsVals); } result[i] = OpType::postProcess(start, shape::length(iterationTadInfo), extraParamsVals); } } } else { shape::TAD xTad(xShapeInfo, dimension, dimensionLength); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); shape::TAD yTad(yShapeInfo, dimension, dimensionLength); yTad.createTadOnlyShapeInfo(); yTad.createOffsets(); int tadsPerThread = resultLength / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); Nd4jLong coord[MAX_RANK]; //#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(coord) for (int i = 0; i < resultLength; i++) { Nd4jLong xOffset = xTad.tadOffsets[i]; Nd4jLong yOffset = yTad.tadOffsets[i]; T start = OpType::startingValue(x + xOffset); for (int j = 0; j < tadLength; j++) { shape::ind2subC(shape::rank(iterationTadInfo), shape::shapeOf(iterationTadInfo), j, coord); Nd4jLong xOffset2 = shape::getOffset(xOffset,shape::shapeOf(xTad.tadOnlyShapeInfo),shape::stride(xTad.tadOnlyShapeInfo),coord,shape::rank(xTad.tadOnlyShapeInfo)); Nd4jLong yOffset2 = shape::getOffset(yOffset,shape::shapeOf(yTad.tadOnlyShapeInfo),shape::stride(yTad.tadOnlyShapeInfo),coord,shape::rank(yTad.tadOnlyShapeInfo)); start = OpType::update(start, OpType::op(x[xOffset2], y[yOffset2],extraParamsVals), extraParamsVals); } result[i] = OpType::postProcess(start, shape::length(iterationTadInfo), extraParamsVals); } } } } }; } } #ifdef __CUDACC__ /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param gpuInformation the gpu information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post */ template <typename T> __device__ void reduce3Generic( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce3::Reduce3<T>::exec( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } template <typename T> __device__ void reduce3AllGeneric( const int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce3::Reduce3<T>::execAllCuda( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } template <typename T> __device__ void reduce3ScalarGeneric( int opNum, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *allocationPointer, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce3::Reduce3<T>::execScalarCuda( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, manager, tadOnlyShapeInfo); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post [ */ extern "C" __global__ void reduce3Double( int opNum, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3Generic<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3AllDouble( int opNum, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3AllGeneric<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param gpuInformation the gpu information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post [ */ extern "C" __global__ void reduce3Float( int opNum, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3Generic<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3AllFloat( int opNum, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3AllGeneric<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3Half( int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3Generic<float16>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3AllHalf( int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3AllGeneric<float16>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3ScalarFloat( int opNum, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *extraParams, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3ScalarGeneric<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3ScalarHalf( int opNum, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *extraParams, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3ScalarGeneric<float16>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } extern "C" __global__ void reduce3ScalarDouble( int opNum, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *extraParams, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *yTadOnlyShapeInfo, Nd4jLong *yTadOffsets) { reduce3ScalarGeneric<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, reductionBuffer, tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets); } #endif #endif /* REDUCE3_H_ */
nh_p_grad.h
#ifndef NH_P_GRAD_H #define NH_P_GRAD_H void nh_p_grad(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage3D& rdx, const Storage3D& rdy, const Storage3D& gz, const Storage3D& pp, const Storage3D& pk3, const Storage3D& wk1, Storage3D& wk, Storage3D& du, Storage3D& dv, const ElementType dt) { for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size + 1; ++i) { for (int64_t j = 0; j < domain_size + 1; ++j) { wk(i, j, k) = (pk3(i, j, k + 1) - pk3(i, j, k)); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { du(i, j, k) = ((dt / (wk(i, j, k) + wk(i + 1, j, k))) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pk3(i + 1, j, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pk3(i, j, k + 1) - pk3(i + 1, j, k))))); uout(i, j, k) = (((uin(i, j, k) + du(i, j, k)) + ((dt / (wk1(i, j, k) + wk1(i + 1, j, k))) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pp(i + 1, j, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pp(i, j, k + 1) - pp(i + 1, j, k)))))) * rdx(i, j, k)); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { dv(i, j, k) = ((dt / (wk(i, j, k) + wk(i, j + 1, k))) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pk3(i, j + 1, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pk3(i, j, k + 1) - pk3(i, j + 1, k))))); vout(i, j, k) = (((vin(i, j, k) + dv(i, j, k)) + ((dt / (wk1(i, j, k) + wk1(i, j + 1, k))) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pp(i, j + 1, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pp(i, j, k + 1) - pp(i, j + 1, k)))))) * rdy(i, j, k)); } } } } void nh_p_grad_fullfusion(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage3D& rdx, const Storage3D& rdy, const Storage3D& gz, const Storage3D& pp, const Storage3D& pk3, const Storage3D& wk1, Storage3D& wk, Storage3D& du, Storage3D& dv, const ElementType dt) { for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto wk_ijk = (pk3(i, j, k + 1) - pk3(i, j, k)); auto wk_i1jk = (pk3(i + 1, j, k + 1) - pk3(i + 1, j, k)); auto wk_ij1k = (pk3(i, j + 1, k + 1) - pk3(i, j + 1, k)); auto _du = ((dt / (wk_ijk + wk_i1jk)) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pk3(i + 1, j, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pk3(i, j, k + 1) - pk3(i + 1, j, k))))); uout(i, j, k) = (((uin(i, j, k) + _du) + ((dt / (wk1(i, j, k) + wk1(i + 1, j, k))) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pp(i + 1, j, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pp(i, j, k + 1) - pp(i + 1, j, k)))))) * rdx(i, j, k)); auto _dv = ((dt / (wk_ijk + wk_ij1k)) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pk3(i, j + 1, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pk3(i, j, k + 1) - pk3(i, j + 1, k))))); vout(i, j, k) = (((vin(i, j, k) + _dv) + ((dt / (wk1(i, j, k) + wk1(i, j + 1, k))) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pp(i, j + 1, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pp(i, j, k + 1) - pp(i, j + 1, k)))))) * rdy(i, j, k)); } } } } void nh_p_grad_partialfusion(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage3D& rdx, const Storage3D& rdy, const Storage3D& gz, const Storage3D& pp, const Storage3D& pk3, const Storage3D& wk1, Storage3D& wk, Storage3D& du, Storage3D& dv, const ElementType dt) { for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto wk_ijk = (pk3(i, j, k + 1) - pk3(i, j, k)); auto wk_i1jk = (pk3(i + 1, j, k + 1) - pk3(i + 1, j, k)); auto _du = ((dt / (wk_ijk + wk_i1jk)) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pk3(i + 1, j, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pk3(i, j, k + 1) - pk3(i + 1, j, k))))); uout(i, j, k) = (((uin(i, j, k) + _du) + ((dt / (wk1(i, j, k) + wk1(i + 1, j, k))) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pp(i + 1, j, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pp(i, j, k + 1) - pp(i + 1, j, k)))))) * rdx(i, j, k)); } } } for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto wk_ijk = (pk3(i, j, k + 1) - pk3(i, j, k)); auto wk_ij1k = (pk3(i, j + 1, k + 1) - pk3(i, j + 1, k)); auto _dv = ((dt / (wk_ijk + wk_ij1k)) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pk3(i, j + 1, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pk3(i, j, k + 1) - pk3(i, j + 1, k))))); vout(i, j, k) = (((vin(i, j, k) + _dv) + ((dt / (wk1(i, j, k) + wk1(i, j + 1, k))) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pp(i, j + 1, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pp(i, j, k + 1) - pp(i, j + 1, k)))))) * rdy(i, j, k)); } } } } void nh_p_grad_openmp(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage3D& rdx, const Storage3D& rdy, const Storage3D& gz, const Storage3D& pp, const Storage3D& pk3, const Storage3D& wk1, Storage3D& wk, Storage3D& du, Storage3D& dv, const ElementType dt) { #pragma omp parallel for for (int64_t k = 0; k < domain_height; ++k) { for (int64_t i = 0; i < domain_size; ++i) { for (int64_t j = 0; j < domain_size; ++j) { auto wk_ijk = (pk3(i, j, k + 1) - pk3(i, j, k)); auto wk_i1jk = (pk3(i + 1, j, k + 1) - pk3(i + 1, j, k)); auto wk_ij1k = (pk3(i, j + 1, k + 1) - pk3(i, j + 1, k)); auto _du = ((dt / (wk_ijk + wk_i1jk)) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pk3(i + 1, j, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pk3(i, j, k + 1) - pk3(i + 1, j, k))))); uout(i, j, k) = (((uin(i, j, k) + _du) + ((dt / (wk1(i, j, k) + wk1(i + 1, j, k))) * (((gz(i, j, k + 1) - gz(i + 1, j, k)) * (pp(i + 1, j, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i + 1, j, k + 1)) * (pp(i, j, k + 1) - pp(i + 1, j, k)))))) * rdx(i, j, k)); auto _dv = ((dt / (wk_ijk + wk_ij1k)) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pk3(i, j + 1, k + 1) - pk3(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pk3(i, j, k + 1) - pk3(i, j + 1, k))))); vout(i, j, k) = (((vin(i, j, k) + _dv) + ((dt / (wk1(i, j, k) + wk1(i, j + 1, k))) * (((gz(i, j, k + 1) - gz(i, j + 1, k)) * (pp(i, j + 1, k + 1) - pp(i, j, k))) + ((gz(i, j, k) - gz(i, j + 1, k + 1)) * (pp(i, j, k + 1) - pp(i, j + 1, k)))))) * rdy(i, j, k)); } } } } #endif // NH_P_GRAD_H
par_amgdd_fac_cycle.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" HYPRE_Int hypre_BoomerAMGDD_FAC( void *amgdd_vdata, HYPRE_Int first_iteration ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; HYPRE_Int cycle_type = hypre_ParAMGDDDataFACCycleType(amgdd_data); HYPRE_Int start_level = hypre_ParAMGDDDataStartLevel(amgdd_data); if (cycle_type == 1 || cycle_type == 2) { hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, cycle_type, first_iteration); } else if (cycle_type == 3) { hypre_BoomerAMGDD_FAC_FCycle(amgdd_vdata, first_iteration); } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "WARNING: unknown AMG-DD FAC cycle type. Defaulting to 1 (V-cycle).\n"); hypre_ParAMGDDDataFACCycleType(amgdd_data) = 1; hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, start_level, 1, first_iteration); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Cycle( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_type, HYPRE_Int first_iteration ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data); hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data); HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data); HYPRE_Int i; // Relax on the real nodes hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 1); // Restrict the residual at all fine points (real and ghost) and set residual at coarse points not under the fine grid if (num_levels > 1) { hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level+1], first_iteration); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0); // Either solve on the coarse level or recurse if (level+1 == num_levels-1) { hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels-1, 3); } else for (i = 0; i < cycle_type; i++) { hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level+1, cycle_type, first_iteration); first_iteration = 0; } // Interpolate up and relax hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level+1]); } hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, level, 2); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_FCycle( void *amgdd_vdata, HYPRE_Int first_iteration ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_ParAMGData *amg_data = hypre_ParAMGDDDataAMG(amgdd_data); hypre_AMGDDCompGrid **compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data); HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data); HYPRE_Int level; // ... work down to coarsest ... if (!first_iteration) { for (level = hypre_ParAMGDDDataStartLevel(amgdd_data); level < num_levels - 1; level++) { hypre_BoomerAMGDD_FAC_Restrict(compGrid[level], compGrid[level+1], 0); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridS(compGrid[level]), 0.0); hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridT(compGrid[level]), 0.0); } } // ... solve on coarsest level ... hypre_BoomerAMGDD_FAC_Relax(amgdd_vdata, num_levels-1, 3); // ... and work back up to the finest for (level = num_levels - 2; level > -1; level--) { // Interpolate up and relax hypre_BoomerAMGDD_FAC_Interpolate(compGrid[level], compGrid[level+1]); // V-cycle hypre_BoomerAMGDD_FAC_Cycle(amgdd_vdata, level, 1, 0); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Interpolate( hypre_AMGDDCompGrid *compGrid_f, hypre_AMGDDCompGrid *compGrid_c ) { hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridP(compGrid_f), hypre_AMGDDCompGridU(compGrid_c), 1.0, hypre_AMGDDCompGridU(compGrid_f)); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Restrict( hypre_AMGDDCompGrid *compGrid_f, hypre_AMGDDCompGrid *compGrid_c, HYPRE_Int first_iteration ) { // Recalculate residual on coarse grid if (!first_iteration) { hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridA(compGrid_c), hypre_AMGDDCompGridU(compGrid_c), 1.0, hypre_AMGDDCompGridF(compGrid_c)); } // Get update: s_l <- A_lt_l + s_l hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridA(compGrid_f), hypre_AMGDDCompGridT(compGrid_f), 1.0, hypre_AMGDDCompGridS(compGrid_f)); // If we need to preserve the updates on the next level if (hypre_AMGDDCompGridS(compGrid_c)) { hypre_AMGDDCompGridMatvec(1.0, hypre_AMGDDCompGridR(compGrid_f), hypre_AMGDDCompGridS(compGrid_f), 0.0, hypre_AMGDDCompGridS(compGrid_c)); // Subtract restricted update from recalculated residual: f_{l+1} <- f_{l+1} - s_{l+1} hypre_AMGDDCompGridVectorAxpy(-1.0, hypre_AMGDDCompGridS(compGrid_c), hypre_AMGDDCompGridF(compGrid_c)); } else { // Restrict and subtract update from recalculated residual: f_{l+1} <- f_{l+1} - P_l^Ts_l hypre_AMGDDCompGridMatvec(-1.0, hypre_AMGDDCompGridR(compGrid_f), hypre_AMGDDCompGridS(compGrid_f), 1.0, hypre_AMGDDCompGridF(compGrid_c)); } // Zero out initial guess on coarse grid hypre_AMGDDCompGridVectorSetConstantValues(hypre_AMGDDCompGridU(compGrid_c), 0.0); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Relax( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_Int numRelax = hypre_ParAMGDDDataFACNumRelax(amgdd_data); HYPRE_Int i; if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid)) { hypre_AMGDDCompGridVectorCopy(hypre_AMGDDCompGridU(compGrid), hypre_AMGDDCompGridTemp(compGrid)); hypre_AMGDDCompGridVectorScale(-1.0, hypre_AMGDDCompGridTemp(compGrid)); } for (i = 0; i < numRelax; i++) { (*hypre_ParAMGDDDataUserFACRelaxation(amgdd_data))(amgdd_vdata, level, cycle_param); } if (hypre_AMGDDCompGridT(compGrid) || hypre_AMGDDCompGridQ(compGrid)) { hypre_AMGDDCompGridVectorAxpy(1.0, hypre_AMGDDCompGridU(compGrid), hypre_AMGDDCompGridTemp(compGrid)); if (hypre_AMGDDCompGridT(compGrid)) { hypre_AMGDDCompGridVectorAxpy(1.0, hypre_AMGDDCompGridTemp(compGrid), hypre_AMGDDCompGridT(compGrid)); } if (hypre_AMGDDCompGridQ(compGrid)) { hypre_AMGDDCompGridVectorAxpy(1.0, hypre_AMGDDCompGridTemp(compGrid), hypre_AMGDDCompGridQ(compGrid)); } } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_Jacobi( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGDD_FAC_JacobiDevice(amgdd_vdata, level); } else #endif { hypre_BoomerAMGDD_FAC_JacobiHost(amgdd_vdata, level); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_JacobiHost( void *amgdd_vdata, HYPRE_Int level ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data); HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid); hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid); hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid); hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid); hypre_CSRMatrix *diag; HYPRE_Int total_real_nodes; HYPRE_Int i, j; // Calculate l1_norms if necessary (right now, I'm just using this vector for the diagonal of A and doing straight ahead Jacobi) if (!hypre_AMGDDCompGridL1Norms(compGrid)) { total_real_nodes = hypre_AMGDDCompGridNumOwnedNodes(compGrid) + hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); hypre_AMGDDCompGridL1Norms(compGrid) = hypre_CTAlloc(HYPRE_Real, total_real_nodes, memory_location); diag = hypre_AMGDDCompGridMatrixOwnedDiag(A); for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i+1]; j++) { // hypre_AMGDDCompGridL1Norms(compGrid)[i] += fabs(hypre_CSRMatrixData(diag)[j]); if (hypre_CSRMatrixJ(diag)[j] == i) { hypre_AMGDDCompGridL1Norms(compGrid)[i] = hypre_CSRMatrixData(diag)[j]; } } } diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A); for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { for (j = hypre_CSRMatrixI(diag)[i]; j < hypre_CSRMatrixI(diag)[i+1]; j++) { // hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] += fabs(hypre_CSRMatrixData(diag)[j]); if (hypre_CSRMatrixJ(diag)[j] == i) { hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] = hypre_CSRMatrixData(diag)[j]; } } } } // Allocate temporary vector if necessary if (!hypre_AMGDDCompGridTemp2(compGrid)) { hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate(); hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid), hypre_AMGDDCompGridNumOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid)); } hypre_AMGDDCompGridVectorCopy(f, hypre_AMGDDCompGridTemp2(compGrid)); hypre_AMGDDCompGridMatvec(-relax_weight, A, u, relax_weight, hypre_AMGDDCompGridTemp2(compGrid)); for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u))[i] += hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] / hypre_AMGDDCompGridL1Norms(compGrid)[i]; } for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u))[i] += hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid)))[i] / hypre_AMGDDCompGridL1Norms(compGrid)[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)]; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_GaussSeidel( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid); hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid); hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid); hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A); hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A); hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A); hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A); HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u)); HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u)); HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f)); HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f)); HYPRE_Int i, j; // loop variables HYPRE_Complex diagonal; // placeholder for the diagonal of A // Do Gauss-Seidel relaxation on the owned nodes for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { // Initialize u as RHS u_owned_data[i] = f_owned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++) { if (hypre_CSRMatrixJ(owned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(owned_diag)[j]; } else { u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ(owned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++) { u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ(owned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n"); } u_owned_data[i] /= diagonal; } // Do Gauss-Seidel relaxation on the nonowned nodes for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { // Initialize u as RHS u_nonowned_data[i] = f_nonowned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++) { if (hypre_CSRMatrixJ(nonowned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(nonowned_diag)[j]; } else { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ(nonowned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++) { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ(nonowned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_GaussSeidel().\n"); } u_nonowned_data[i] /= diagonal; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_OrderedGaussSeidel( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; hypre_AMGDDCompGridMatrix *A = hypre_AMGDDCompGridA(compGrid); hypre_AMGDDCompGridVector *f = hypre_AMGDDCompGridF(compGrid); hypre_AMGDDCompGridVector *u = hypre_AMGDDCompGridU(compGrid); HYPRE_Int unordered_i, i, j; // loop variables HYPRE_Complex diagonal; // placeholder for the diagonal of A if (!hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid)) { hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int, hypre_AMGDDCompGridNumOwnedNodes(compGrid), hypre_AMGDDCompGridMemoryLocation(compGrid)); hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid), hypre_AMGDDCompGridNumOwnedNodes(compGrid)); } if (!hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid)) { hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid) = hypre_CTAlloc(HYPRE_Int, hypre_AMGDDCompGridNumNonOwnedNodes(compGrid), hypre_AMGDDCompGridMemoryLocation(compGrid)); hypre_topo_sort(hypre_CSRMatrixI(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_CSRMatrixJ(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_CSRMatrixData(hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid))), hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid), hypre_AMGDDCompGridNumNonOwnedNodes(compGrid)); } // Get all the info HYPRE_Complex *u_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(u)); HYPRE_Complex *u_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(u)); HYPRE_Complex *f_owned_data = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(f)); HYPRE_Complex *f_nonowned_data = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(f)); hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(A); hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(A); hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(A); hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(A); // Do Gauss-Seidel relaxation on the nonowned real nodes for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); unordered_i++) { i = hypre_AMGDDCompGridNonOwnedRelaxOrdering(compGrid)[unordered_i]; // Initialize u as RHS u_nonowned_data[i] = f_nonowned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++) { if (hypre_CSRMatrixJ(nonowned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(nonowned_diag)[j]; } else { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_diag)[j] * u_nonowned_data[ hypre_CSRMatrixJ(nonowned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++) { u_nonowned_data[i] -= hypre_CSRMatrixData(nonowned_offd)[j] * u_owned_data[ hypre_CSRMatrixJ(nonowned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n"); } u_nonowned_data[i] /= diagonal; } // Do Gauss-Seidel relaxation on the owned nodes for (unordered_i = 0; unordered_i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); unordered_i++) { i = hypre_AMGDDCompGridOwnedRelaxOrdering(compGrid)[unordered_i]; // Initialize u as RHS u_owned_data[i] = f_owned_data[i]; diagonal = 0.0; // Loop over diag entries for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++) { if (hypre_CSRMatrixJ(owned_diag)[j] == i) { diagonal = hypre_CSRMatrixData(owned_diag)[j]; } else { u_owned_data[i] -= hypre_CSRMatrixData(owned_diag)[j] * u_owned_data[ hypre_CSRMatrixJ(owned_diag)[j] ]; } } // Loop over offd entries for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++) { u_owned_data[i] -= hypre_CSRMatrixData(owned_offd)[j] * u_nonowned_data[ hypre_CSRMatrixJ(owned_offd)[j] ]; } // Divide by diagonal if (diagonal == 0.0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"WARNING: Divide by zero diagonal in hypre_BoomerAMGDD_FAC_OrderedGaussSeidel().\n"); } u_owned_data[i] /= diagonal; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_CFL1Jacobi( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int cycle_param ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_MemoryLocation memory_location = hypre_AMGDDCompGridMemoryLocation(compGrid); HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(memory_location); if (exec == HYPRE_EXEC_DEVICE) { if (cycle_param == 1) { hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1); hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0); } else if (cycle_param == 2) { hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0); hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 1); } else { hypre_BoomerAMGDD_FAC_CFL1JacobiDevice(amgdd_vdata, level, 0); } } else #endif { if (cycle_param == 1) { hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1); hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0); } else if (cycle_param == 2) { hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0); hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 1); } else { hypre_BoomerAMGDD_FAC_CFL1JacobiHost(amgdd_vdata, level, 0); } } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGDD_FAC_CFL1JacobiHost( void *amgdd_vdata, HYPRE_Int level, HYPRE_Int relax_set ) { hypre_ParAMGDDData *amgdd_data = (hypre_ParAMGDDData*) amgdd_vdata; hypre_AMGDDCompGrid *compGrid = hypre_ParAMGDDDataCompGrid(amgdd_data)[level]; HYPRE_Real relax_weight = hypre_ParAMGDDDataFACRelaxWeight(amgdd_data); hypre_CSRMatrix *owned_diag = hypre_AMGDDCompGridMatrixOwnedDiag(hypre_AMGDDCompGridA(compGrid)); hypre_CSRMatrix *owned_offd = hypre_AMGDDCompGridMatrixOwnedOffd(hypre_AMGDDCompGridA(compGrid)); hypre_CSRMatrix *nonowned_diag = hypre_AMGDDCompGridMatrixNonOwnedDiag(hypre_AMGDDCompGridA(compGrid)); hypre_CSRMatrix *nonowned_offd = hypre_AMGDDCompGridMatrixNonOwnedOffd(hypre_AMGDDCompGridA(compGrid)); HYPRE_Complex *owned_u = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridU(compGrid))); HYPRE_Complex *nonowned_u = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridU(compGrid))); HYPRE_Complex *owned_f = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridF(compGrid))); HYPRE_Complex *nonowned_f = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridF(compGrid))); HYPRE_Real *l1_norms = hypre_AMGDDCompGridL1Norms(compGrid); HYPRE_Int *cf_marker = hypre_AMGDDCompGridCFMarkerArray(compGrid); HYPRE_Complex *owned_tmp; HYPRE_Complex *nonowned_tmp; HYPRE_Int i, j; HYPRE_Real res; /*----------------------------------------------------------------- * Create and initialize Temp2 vector if not done before. *-----------------------------------------------------------------*/ if (!hypre_AMGDDCompGridTemp2(compGrid)) { hypre_AMGDDCompGridTemp2(compGrid) = hypre_AMGDDCompGridVectorCreate(); hypre_AMGDDCompGridVectorInitialize(hypre_AMGDDCompGridTemp2(compGrid), hypre_AMGDDCompGridNumOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedNodes(compGrid), hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid)); } owned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorOwned(hypre_AMGDDCompGridTemp2(compGrid))); nonowned_tmp = hypre_VectorData(hypre_AMGDDCompGridVectorNonOwned(hypre_AMGDDCompGridTemp2(compGrid))); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { owned_tmp[i] = owned_u[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedNodes(compGrid); i++) { nonowned_tmp[i] = nonowned_u[i]; } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < hypre_AMGDDCompGridNumOwnedNodes(compGrid); i++) { if (cf_marker[i] == relax_set) { res = owned_f[i]; for (j = hypre_CSRMatrixI(owned_diag)[i]; j < hypre_CSRMatrixI(owned_diag)[i+1]; j++) { res -= hypre_CSRMatrixData(owned_diag)[j] * owned_tmp[ hypre_CSRMatrixJ(owned_diag)[j] ]; } for (j = hypre_CSRMatrixI(owned_offd)[i]; j < hypre_CSRMatrixI(owned_offd)[i+1]; j++) { res -= hypre_CSRMatrixData(owned_offd)[j] * nonowned_tmp[ hypre_CSRMatrixJ(owned_offd)[j] ]; } owned_u[i] += (relax_weight * res)/l1_norms[i]; } } for (i = 0; i < hypre_AMGDDCompGridNumNonOwnedRealNodes(compGrid); i++) { if (cf_marker[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)] == relax_set) { res = nonowned_f[i]; for (j = hypre_CSRMatrixI(nonowned_diag)[i]; j < hypre_CSRMatrixI(nonowned_diag)[i+1]; j++) { res -= hypre_CSRMatrixData(nonowned_diag)[j] * nonowned_tmp[ hypre_CSRMatrixJ(nonowned_diag)[j] ]; } for (j = hypre_CSRMatrixI(nonowned_offd)[i]; j < hypre_CSRMatrixI(nonowned_offd)[i+1]; j++) { res -= hypre_CSRMatrixData(nonowned_offd)[j] * owned_tmp[ hypre_CSRMatrixJ(nonowned_offd)[j] ]; } nonowned_u[i] += (relax_weight * res)/l1_norms[i + hypre_AMGDDCompGridNumOwnedNodes(compGrid)]; } } return hypre_error_flag; }
move_shallow_water_particle_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // Pablo Becker // #ifndef KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED #define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED ///@defgroup MoveShallowWaterParticleUtility ///@brief Utility to move particles on the eulerian mesh with an /// explicit scheme. This is the basic tool of the pfem2 framework // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/checks.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "utilities/math_utils.h" #include "includes/global_pointer_variables.h" #include "processes/node_erase_process.h" #include "utilities/geometry_utilities.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "spatial_containers/bins_dynamic_objects.h" #include "utilities/spatial_containers_configure.h" #include "geometries/triangle_2d_3.h" #include "geometries/triangle_3d_3.h" #include "shallow_water_application_variables.h" #include "shallow_water_particle.h" #include "utilities/parallel_utilities.h" #include "time.h" namespace Kratos { template< unsigned int TDim> class MoveShallowWaterParticleUtility { public: typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; typedef SpatialContainersConfigure<TDim> Configure; typedef typename Configure::PointType PointType; typedef typename Configure::ContainerType ContainerType; typedef typename Configure::IteratorType IteratorType; typedef typename Configure::ResultContainerType ResultContainerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector; KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility); MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) : mrModelPart(rModelPart), mScalarVar1(&KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ), mVectorVar1(&KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) ) { KRATOS_TRY std::cout << "Initializing moveparticle utility for scalar transport" << std::endl; Parameters default_parameters( R"( { "convection_scalar_variable" : "HEIGHT", "convection_vector_variable" : "VELOCITY", "maximum_number_of_particles" : 16 } )" ); // Now validate agains defaults -- this also ensures no type mismatch rParameters.ValidateAndAssignDefaults(default_parameters); m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString(); m_vector_var1_name = rParameters["convection_vector_variable"].GetString(); mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble(); Check(); //storing water and air density and their inverses, just in case it is needed for the streamline integration //loop in elements to change their ID to their position in the array. Easier to get information later. //DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!! ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin(); for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii; ielem->SetId(ii+1); } mLastNodeId = (mrModelPart.NodesEnd() - 1)->Id(); //matrix containing the position of the 4/15/45 particles that we will seed at the beggining BoundedMatrix<double, 5*(1+TDim), 3 > pos; BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N; int particle_id=0; mNElems = mrModelPart.Elements().size(); std::cout << " about to resize vectors" << std::endl; //setting the right size to the vector containing the particles assigned to each element //particles vector. this vector contains ALL the particles in the simulation. mParticlesVector.resize(mNElems*mMaxNumberOfParticles); //and this vector contains the current number of particles that are in each element (currently zero) mNumOfParticlesInElems.resize(mNElems); mNumOfParticlesInElems=ZeroVector(mNElems); //when moving the particles, an auxiliary vector is necessary (to store the previous number) mNumOfParticlesInElemsAux.resize(mNElems); //each element will have a list of pointers to all the particles that are inside. //this vector contains the pointers to the vector of (particle) pointers of each element. mVectorOfParticlePointersVectors.resize(mNElems); //int artz; //std::cin >> artz; int i_int=0; //careful! it's not the id, but the position inside the array! std::cout << " about to create particles" << std::endl; //now we seed: LOOP IN ELEMENTS //using loop index, DO NOT parallelize this! mOffset=0; for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++) { ModelPart::ElementsContainerType::iterator i_elem = ielembegin+ii; mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 ); ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii]; int & number_of_particles = mNumOfParticlesInElems[ii]; number_of_particles=0; GeometryType& geom = i_elem->GetGeometry(); ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45 //now we seed the particles in the current element for (unsigned int j = 0; j < pos.size1(); j++) { ++particle_id; ShallowParticle& p_particle = mParticlesVector[particle_id-1]; p_particle.Coordinates() = row(pos,j); p_particle.GetEraseFlag()=false; array_1d<double, 3 > & vector1 = p_particle.GetVector1(); double & scalar1 = p_particle.GetScalar1(); noalias(vector1) = ZeroVector(3); scalar1=0.0; for (unsigned int k = 0; k < (TDim+1); k++) { scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(*mScalarVar1); noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(*mVectorVar1); } particle_pointers(j) = &p_particle; number_of_particles++ ; } ++i_int; } mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true. std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl; mParticlePrintingToolInitialized=false; KRATOS_CATCH("") } ~MoveShallowWaterParticleUtility() {} void MountBin() { KRATOS_TRY //copy the elements to a new container, as the list will //be shuffled duringthe construction of the tree ContainerType& rElements = mrModelPart.ElementsArray(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); //const int number_of_elem = rElements.size(); typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) ); paux.swap(mpBinsObjectDynamic); //BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end ); std::cout << " finished mounting Bins" << std::endl; KRATOS_CATCH("") } /// Calculates the mean velocity /** This function computes the mean velocity within an element and the CFL * This variable keeps the courant number aprox 0.1 in each substep * * @see MoveParticle * @see MoveParticleInverseWay */ void CalculateVelOverElemSize() { KRATOS_TRY const double nodal_weight = 1.0/ (1.0 + double (TDim) ); const double dt = mrModelPart.GetProcessInfo()[DELTA_TIME]; block_for_each(mrModelPart.Elements(), [&](Element& rElem){ const GeometryType& geom = rElem.GetGeometry(); array_1d<double, 3 >mean_velocity=ZeroVector(3); for (unsigned int i=0; i != (TDim+1) ; i++) mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY); mean_velocity *= nodal_weight; double cfl_number = dt * norm_2(mean_velocity) / rElem.GetGeometry().Length(); rElem.SetValue(CFL_NUMBER, cfl_number); }); KRATOS_CATCH("") } /// Reset the boundary conditions /** When a variable is fixed this function resets the nodal values * with the previous time step */ void ResetBoundaryConditions() { KRATOS_TRY const auto& vector_var_x = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_X")); const auto& vector_var_y = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_Y")); const auto& vector_var_z = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_Z")); block_for_each(mrModelPart.Nodes(), [&](NodeType& rNode){ if (rNode.IsFixed(*mScalarVar1)) { rNode.FastGetSolutionStepValue(*mScalarVar1)=rNode.GetSolutionStepValue(*mScalarVar1,1); } if (rNode.IsFixed(vector_var_x)) { rNode.FastGetSolutionStepValue(vector_var_x)=rNode.GetSolutionStepValue(vector_var_x,1); } if (rNode.IsFixed(vector_var_y)) { rNode.FastGetSolutionStepValue(vector_var_y)=rNode.GetSolutionStepValue(vector_var_y,1); } if (rNode.IsFixed(vector_var_z)) { rNode.FastGetSolutionStepValue(vector_var_z)=rNode.GetSolutionStepValue(vector_var_z,1); } }); KRATOS_CATCH("") } /// Auxiliary function to compute the "delta variables" /** Delta variables are the difference between two time steps. * It's value is used to update particles info * * @see CorrectParticlesWithoutMovingUsingDeltaVariables */ void CalculateDeltaVariables() { KRATOS_TRY block_for_each(mrModelPart.Nodes(), [&](NodeType& rNode){ rNode.FastGetSolutionStepValue(DELTA_SCALAR) = rNode.FastGetSolutionStepValue(*mScalarVar1) - rNode.FastGetSolutionStepValue(PROJECTED_SCALAR); noalias(rNode.FastGetSolutionStepValue(DELTA_VECTOR)) = rNode.FastGetSolutionStepValue(*mVectorVar1) - rNode.FastGetSolutionStepValue(PROJECTED_VECTOR); }); KRATOS_CATCH("") } /// Auxiliary function /** This function copy a scalar variable value to the previous time step */ void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY block_for_each(mrModelPart.Nodes(), [&](NodeType& rNode){ rNode.GetSolutionStepValue(OriginVariable,1) = rNode.FastGetSolutionStepValue(OriginVariable); }); KRATOS_CATCH("") } /// Auxiliary function /** This function copy a vector variable value to the previous time step */ void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY block_for_each(mrModelPart.Nodes(), [&](NodeType& rNode){ noalias(rNode.GetSolutionStepValue(OriginVariable,1)) = rNode.FastGetSolutionStepValue(OriginVariable); }); KRATOS_CATCH("") } /// Move all the particles /** This function moves the particles across the streamlines * according to the velocity given by VELOCITY variable. The * movement is performed in nsubsteps, during a total time * of DELTA_TIME * * @see Moveparticle */ void MoveParticles() { KRATOS_TRY const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part) //since it is the only function in the whole procedure that does this, it must use alternatively one part and the other. bool even_timestep; if (offset!=0) even_timestep=false; else even_timestep=true; const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles double delta_t = CurrentProcessInfo[DELTA_TIME]; array_1d<double,TDim+1> N; const unsigned int max_results = 10000; mMaxSubSteps = 10; mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps); unsigned int num_elems = mrModelPart.Elements().size(); IndexPartition<unsigned int>(num_elems).for_each([&](unsigned int ii){ int & number_of_particles = mNumOfParticlesInElems[ii]; mNumOfParticlesInElemsAux[ii] = number_of_particles; mNumOfParticlesInElems[ii] = 0; }); std::cout << "convecting particles" << std::endl; //We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle) #pragma omp barrier struct TLS { ResultContainerType results; GlobalPointersVector<Element> elements_in_trajectory; }; TLS tls; tls.results.resize(max_results); tls.elements_in_trajectory.resize(20); IndexPartition<unsigned int>(num_elems).for_each(tls, [&](unsigned int i, TLS& rTLS){ auto it_old_element = mrModelPart.ElementsBegin() + i; ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[i]; if ( (rTLS.results.size()) != max_results ) rTLS.results.resize(max_results); unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem) for (int ii = 0; ii < mNumOfParticlesInElemsAux[i]; ii++) { ShallowParticle& p_particle = old_element_particle_pointers[offset+ii]; Element::Pointer p_current_element(*it_old_element.base()); ResultIteratorType result_begin = rTLS.results.begin(); bool & erase_flag = p_particle.GetEraseFlag(); if (erase_flag == false){ MoveParticle(p_particle,p_current_element,rTLS.elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina const int current_element_id = p_current_element->Id(); int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1]; if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false) { ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1]; #pragma omp critical { if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!! { current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &p_particle; number_of_particles_in_current_elem++ ; KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) << "In move shallow water particle utility: exceeded maximum number of particles" << std::endl; } else { p_particle.GetEraseFlag()=true; //so we just delete it! } } } else { p_particle.GetEraseFlag()=true; //so we just delete it! } } } }); // After having changed everything we change the status of the mOddTimeStep flag: mOffset = post_offset; KRATOS_CATCH("") } /// Transfer particles information to the mesh nodes /** This function explicitly projects data from particles (lagrangian) * onto the eulerian mesh. Shape functions of the elements determine * the particle location within the element and its contribution to * each node as a weighting function. */ void TransferLagrangianToEulerian() //explicit { KRATOS_TRY const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0); std::cout << "projecting info to mesh" << std::endl; const int offset = mOffset; // the array of pointers for each element has twice the required size so that // we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles) // We must project data from the particles (lagrangian) onto the mesh (eulerian) // We save data from previous time step of the eulerian mesh in case we must reuse it later // cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later! // after having saved data, we reset them to zero, this way it's easier to add the contribution // of the surrounding particles. block_for_each(mrModelPart.Nodes(), [&](NodeType& rNode){ rNode.FastGetSolutionStepValue(PROJECTED_SCALAR)=0.0; noalias(rNode.FastGetSolutionStepValue(PROJECTED_VECTOR))=ZeroVector(3); rNode.FastGetSolutionStepValue(INTEGRATION_WEIGHT)=0.0; }); // Adding contribution, loop on elements, since each element has stored the particles found inside of it IndexPartition<unsigned int>(mrModelPart.NumberOfElements()).for_each([&](unsigned int ii){ array_1d<double,3*(TDim+1)> nodes_positions; array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1)); array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1)); array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1)); auto i_elem = mrModelPart.ElementsBegin() + ii; GeometryType& geom = i_elem->GetGeometry(); for (int i=0 ; i!=(TDim+1) ; ++i) { nodes_positions[i*3+0]=geom[i].X(); nodes_positions[i*3+1]=geom[i].Y(); nodes_positions[i*3+2]=geom[i].Z(); } int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; for (int iii=0; iii < number_of_particles_in_elem; iii++ ) { if (iii == mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop! break; ShallowParticle& p_particle = element_particle_pointers[offset+iii]; if (p_particle.GetEraseFlag() == false) { array_1d<double,3> & position = p_particle.Coordinates(); const double& particle_scalar1 = p_particle.GetScalar1(); const array_1d<double,3>& particle_vector1 = p_particle.GetVector1(); array_1d<double,TDim+1> N; bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N); if (is_found == false) // Something went wrong. if it was close enough to the edge we simply send it inside the element. { KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl; for (int j=0 ; j!=(TDim+1); j++) if (N[j] < 0.0 && N[j] > -1e-5) N[j] = 1e-10; } for (int j = 0 ; j != TDim+1; j++) //going through the 3/4 nodes of the element { // These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions double weight = N(j)*N(j); if (weight < threshold) weight=1e-10; nodes_added_weights[j] += weight; nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1); for (int k = 0 ; k != TDim; k++) //x,y,(z) { nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]); } } } } for (int i = 0 ; i != TDim+1; ++i) { geom[i].SetLock(); geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR) += nodes_added_scalar1[i]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR_X) += nodes_added_vector1[3*i+0]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR_Y) += nodes_added_vector1[3*i+1]; geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR_Z) += nodes_added_vector1[3*i+2]; geom[i].FastGetSolutionStepValue(INTEGRATION_WEIGHT) += nodes_added_weights[i]; geom[i].UnSetLock(); } }); block_for_each(mrModelPart.Nodes(), [&](NodeType& rNode){ double sum_weights = rNode.FastGetSolutionStepValue(INTEGRATION_WEIGHT); if (sum_weights > 0.00001) { double & scalar = rNode.FastGetSolutionStepValue(PROJECTED_SCALAR); array_1d<double,3> & vector = rNode.FastGetSolutionStepValue(PROJECTED_VECTOR); scalar /= sum_weights; vector /= sum_weights; } else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case.. { rNode.FastGetSolutionStepValue(PROJECTED_SCALAR)=rNode.FastGetSolutionStepValue(*mScalarVar1,1); noalias(rNode.FastGetSolutionStepValue(PROJECTED_VECTOR))=rNode.FastGetSolutionStepValue(*mVectorVar1,1); } }); KRATOS_CATCH("") } /// Update all the particles without moving them /** This function updates all the particles variables using the * "delta variables" from the nodal database. * * @see CorrectParticleUsingDeltaVariables */ void CorrectParticlesWithoutMovingUsingDeltaVariables() { KRATOS_TRY const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones. //(flag managed only by MoveParticles) auto i_elem_begin = mrModelPart.ElementsBegin(); IndexPartition<unsigned int>(mrModelPart.NumberOfElements()).for_each([&](unsigned int i){ auto ielem = i_elem_begin + i; Element::Pointer p_element(*ielem.base()); GeometryType& geom = ielem->GetGeometry(); int & number_of_particles_in_elem= mNumOfParticlesInElems[i]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[i]; for (int iii = 0; iii < number_of_particles_in_elem ; iii++) { if (iii > mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop! break; ShallowParticle& p_particle = element_particle_pointers[offset+iii]; bool erase_flag= p_particle.GetEraseFlag(); if (erase_flag == false) { CorrectParticleUsingDeltaVariables(p_particle, p_element, geom); //'lite' version, we pass by reference the geometry, so much cheaper } } }); KRATOS_CATCH("") } /// Fill an element with particles /** This function is to be executed after moving particles and * before tranferring data from lagrangian particles to eulerian mesh * If an element finishes with less particles than "minimum number * of particles", then PreReseed adds particles inside it. * A minimal reseed is performed in order to not disturb the projection * from lagrangian to eulerian. * * @see MinimumNumberOfParticles * * @see MoveParticles * @see MoveParticleInverseWay: is called to get the particle values */ void PreReseed(int MinimumNumberOfParticles) { KRATOS_TRY const int offset = mOffset; const int max_results = 1000; struct TLS { ResultContainerType results; unsigned int free_particle = 0; //we start with the first position in the particles array }; TLS tls; tls.results.resize(max_results); auto it_elem_begin = mrModelPart.ElementsBegin(); unsigned int num_elems = mrModelPart.NumberOfElements(); IndexPartition<unsigned int>(num_elems).for_each(tls, [&](unsigned int ii, TLS& rTLS){ auto it_elem = it_elem_begin + ii; if (rTLS.results.size() != max_results) rTLS.results.resize(max_results); int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (it_elem->GetGeometry())[0].Y()<0.10 ) { BoundedMatrix<double, TDim+1, 3> pos; BoundedMatrix<double, TDim+1, TDim+1> N; GeometryType& geom = it_elem->GetGeometry(); ComputeGaussPointPositionsForPreReseed(geom, pos, N); for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element { bool keep_looking = true; while(keep_looking) { if (mParticlesVector[rTLS.free_particle].GetEraseFlag()==true) { #pragma omp critical { if (mParticlesVector[rTLS.free_particle].GetEraseFlag()==true) { mParticlesVector[rTLS.free_particle].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else rTLS.free_particle++; } else rTLS.free_particle++; } ShallowParticle p_particle(pos(j,0), pos(j,1), pos(j,2)); array_1d<double,TDim+1> aux_N; bool is_found = CalculatePosition(geom, pos(j,0), pos(j,1), pos(j,2), aux_N); KRATOS_ERROR_IF_NOT( is_found ) << "In move shallow water particle utility: particle not found in domain" << std::endl; p_particle.GetEraseFlag()=false; ResultIteratorType result_begin = rTLS.results.begin(); Element::Pointer p_element(*it_elem.base()); MoveParticleInverseWay(p_particle, p_element, result_begin, max_results); //and we copy it to the array: mParticlesVector[rTLS.free_particle] = p_particle; element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[rTLS.free_particle]; p_particle.GetEraseFlag()=false; number_of_particles_in_elem++; } } }); KRATOS_CATCH("") } /// Fill an element with particles /** This function is to be executed after the mesh stage solver is * called and the particles are updated. * If an element contains less particles than "minimum number of * particles", then PostReseed adds particles inside it. * A full reseed is performed and the particle gets it's convected * variables directly from the eulerian mesh * * @param MinimumNumberOfParticles * * @see PreReseed */ void PostReseed(int MinimumNumberOfParticles) //pooyan's way { KRATOS_TRY const int offset = mOffset; unsigned int free_particle = 0; //we start by the first position; auto it_elem_begin = mrModelPart.ElementsBegin(); unsigned int num_elems = mrModelPart.NumberOfElements(); IndexPartition<unsigned int>(num_elems).for_each(free_particle, [&](unsigned int ii, unsigned int FreeParticleTLS){ auto it_elem = it_elem_begin + ii; int & number_of_particles_in_elem = mNumOfParticlesInElems[ii]; ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii]; GeometryType& geom = it_elem->GetGeometry(); if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) ) { BoundedMatrix<double, 3+2*TDim, 3> pos; //7 particles (2D) or 9 particles (3D) BoundedMatrix<double, 3+2*TDim, TDim+1> N; ComputeGaussPointPositionsForPostReseed(geom, pos, N); unsigned int number_of_reseeded_particles = 3 + 2*TDim; for (unsigned int j = 0; j < number_of_reseeded_particles; j++) { // Now we have to find an empty space (a particle that was about to be deleted) in the // particles model part. once found. there will be our renewed particle: bool keep_looking = true; while(keep_looking) { if (mParticlesVector[FreeParticleTLS].GetEraseFlag()==true) { #pragma omp critical { if (mParticlesVector[FreeParticleTLS].GetEraseFlag()==true) { mParticlesVector[FreeParticleTLS].GetEraseFlag()=false; keep_looking=false; } } if (keep_looking==false) break; else FreeParticleTLS++; } else FreeParticleTLS++; } ShallowParticle p_particle(pos(j,0), pos(j,1), pos(j,2)); array_1d<double,TDim+1> aux_N; bool is_found = CalculatePosition(geom, pos(j,0), pos(j,1), pos(j,2), aux_N); KRATOS_ERROR_IF_NOT(is_found) << "In move shallow water particle utility: particle not found in domain" << std::endl; double mesh_scalar1 = 0.0; array_1d<double,3> mesh_vector1 = ZeroVector(3); for (unsigned int l = 0; l < (TDim+1); l++) { mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(*mScalarVar1); noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(*mVectorVar1); } p_particle.GetScalar1() = mesh_scalar1; p_particle.GetVector1() = mesh_vector1; p_particle.GetEraseFlag() = false; mParticlesVector[FreeParticleTLS] = p_particle; element_particle_pointers(offset + number_of_particles_in_elem) = &mParticlesVector[FreeParticleTLS]; number_of_particles_in_elem++; KRATOS_ERROR_IF(keep_looking) << "In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl; } } }); KRATOS_CATCH("") } /// Fill a model part with particles /** This function prints the particles to a model part * * @param rLagrangianModelPart: empty model part to print particles * @param FilterFactor: the function will print one particle of every "filter factor" */ void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor ) { KRATOS_TRY // We will only print one out of every "filter factor" particles of the total particle list if (mParticlePrintingToolInitialized == false) { KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) << "In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl; rLagrangianModelPart.AddNodalSolutionStepVariable(*mScalarVar1); rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT); for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++) { Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!! pnode->SetBufferSize(1); } mParticlePrintingToolInitialized=true; } // Resetting data of the unused particles const double inactive_particle_position = -10.0; array_1d<double,3>inactive_particle_position_vector; inactive_particle_position_vector(0)=inactive_particle_position; inactive_particle_position_vector(1)=inactive_particle_position; inactive_particle_position_vector(2)=inactive_particle_position; ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin(); for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->FastGetSolutionStepValue(*mScalarVar1) = 0.0; inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector; } int counter = 0; for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++) { ShallowParticle& p_particle = mParticlesVector[i]; if(p_particle.GetEraseFlag() == false && i%FilterFactor == 0) { ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node. inode->FastGetSolutionStepValue(*mScalarVar1) = p_particle.GetScalar1(); inode->FastGetSolutionStepValue(DISPLACEMENT) = p_particle.Coordinates(); counter++; } } KRATOS_CATCH("") } protected: private: /// Move a particle /** this function moves a particle according to the velocity given * by VELOCITY variable. The movement is performed in nsubsteps, * during a total time of DELTA_TIME * * @param pParticle * @param pElement * @param rElementsInTrajectory * @param rNumberOfElementsInTrajectory * @param ResultBegin * @param MaxNumberOfResults * * @see MoveParticles */ void MoveParticle(ShallowParticle & pParticle, Element::Pointer & pElement, GlobalPointersVector< Element >& rElementsInTrajectory, unsigned int & rNumberOfElementsInTrajectory, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool keep_integrating = false; bool is_found; array_1d<double,3> vel; array_1d<double,3> vel_without_other_phase_nodes = ZeroVector(3); array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pParticle.Coordinates(); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { keep_integrating = true; GeometryType& geom = pElement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j = 0; j< TDim+1; j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } //calculating substep to get +- courant(substep) = 0.1 nsubsteps = 10.0 * pElement->GetValue(CFL_NUMBER); if (nsubsteps < 1) nsubsteps = 1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0;// weight;//*double(nsubsteps); position += vel*substep_dt;//weight; // DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY unsigned int check_from_element_number = 0; for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle. { if (keep_integrating == true) { is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { GeometryType& geom = pElement->GetGeometry();//the element we're in vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } only_integral += 1.0; //values saved for the current time step position += vel * substep_dt;//weight; } else { keep_integrating = false; break; } } else break; } } if (keep_integrating == false) (pParticle.GetEraseFlag()=true); else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement) if (is_found == false) ( pParticle.GetEraseFlag()=true); pParticle.Coordinates() = position; } /// This function updates a particle /** This function updates a particle variables using the "delta * variables" from the nodal database. * * @param pParticle * @param pElement * @param rGeom * * @see CorrectParticlesWithoutMovingUsingDeltaVariables */ void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle, Element::Pointer & pElement, GeometryType& rGeom) { array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. array_1d<double,3> coords = pParticle.Coordinates(); double & particle_scalar1 = pParticle.GetScalar1(); array_1d<double,3> & particle_vector1 = pParticle.GetVector1(); //double distance=0.0; double delta_scalar1 = 0.0; array_1d<double,3> delta_vector1 = ZeroVector(3); bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N); if(is_found == false) { KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl; for (int j = 0 ; j != TDim+1; j++) if (N[j] < 0.0 ) N[j] = 1e-10; } for(unsigned int j=0; j<(TDim+1); j++) { delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR)*N[j]; noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR)*N[j]; } particle_scalar1 = particle_scalar1 + delta_scalar1; particle_vector1 = particle_vector1 + delta_vector1; } /// Move a particle in the inverse way /** this function moves a particle according to the -velocity given * by VELOCITY variable. The movement is performed by a backward * integration in nsubsteps, during a total time of DELTA_TIME * Before the particle goes out of the element, gets the value * of the eulerian mesh and stores it * * @param pParticle * @param pElement * @param ResultBegin * @param MaxNumberOfResults * * @see PreReseed */ void MoveParticleInverseWay(ShallowParticle & pParticle, Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO! ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; unsigned int nsubsteps; double substep_dt; bool keep_integrating = false; bool is_found; double scalar1 = 0.0; array_1d<double,3> vector1; array_1d<double,3> vel; array_1d<double,3> position; array_1d<double,3> mid_position; array_1d<double,TDim+1> N; //we start with the first position, then it will enter the loop. position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates double only_integral = 0.0 ; is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if(is_found == true) { keep_integrating = true; GeometryType& geom = pElement->GetGeometry(); //the element we're in scalar1 = 0.0; vector1 = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j = 0; j < TDim+1; j++) { scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1) * N[j]; noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1) * N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY) * N[j]; } //calculating substep to get +- courant(substep) = 1/4 nsubsteps = 10.0 * pElement->GetValue(CFL_NUMBER); if (nsubsteps < 1) nsubsteps = 1; substep_dt = delta_t / double(nsubsteps); only_integral = 1.0; // weight;//*double(nsubsteps); position -= vel*substep_dt; //weight; for(unsigned int i = 0; i < nsubsteps-1; i++) // this is for the substeps n+1. in the first one we already knew the position of the particle. { if (keep_integrating == true) { is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is: if (is_found == true) { GeometryType& geom = pElement->GetGeometry();//the element we're in scalar1 = 0.0; vector1 = ZeroVector(3); vel = ZeroVector(3); for(unsigned int j=0; j<(TDim+1); j++) { scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N(j); noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j]; noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j]; } only_integral += 1.0; //weight ; //values saved for the current time step position -= vel*substep_dt; //weight; } else keep_integrating = false; } } pParticle.GetScalar1() = scalar1; pParticle.GetVector1() = vector1; } } /// Find the element into which a given node is located /** This function should find the element into which a given node * is located and return a pointer to the element and the vector * containing the shape functions that define the positions within * the element. * If false is returned the element is not found * * @param position of the node * @param N return shape functions that define the positions within the elem * @param pElement: return a pointer to the element * @param ResultBegin * @param MaxNumberOfResults * @return FindNodeOnMesh if the element is found of not * * @see CalculatePosition */ bool FindNodeOnMesh( const array_1d<double,3>& rPosition, array_1d<double,TDim+1>& N, Element::Pointer & pElement, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. GeometryType& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_1) //that was easy! { return true; } // To begin with we check the neighbour elements; it is a bit more expensive GlobalPointersVector<Element>& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i = 0; i != neighb_elems.size(); i++) { GeometryType& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom, rPosition[0], rPosition[1], rPosition[2], N); if (is_found_2) { pElement = neighb_elems[i].shared_from_this(); return true; } } // If checking all the neighbour elements did not work, we have to use the bins // ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults ); if (results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i < results_found; i++) { GeometryType& geom = (*(ResultBegin + i))->GetGeometry(); //find local position bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found_3) { pElement = (*(ResultBegin + i))->shared_from_this(); return true; } } } //if nothing worked, then: //not found case return false; } /// Find the element into which a given node is located /** This function should find the element into which a given node * is located and return a pointer to the element and the vector * containing the shape functions that define the positions within * the element. * If false is returned the element is not found * This version includes predefined elements following a trajectory * * @param rPosition of the node * @param N Output shape functions that define the positions within the elem * @param pElement Output a pointer to the element * @param rElementsInTrajectory * @param rNumberOfElementsInTrajectory Output * @param CheckFromElementNumber * @param ResultBegin * @param MaxNumberOfResults * @return FindNodeOnMesh if the element is found of not * * @see CalculatePosition */ bool FindNodeOnMesh( const array_1d<double,3>& rPosition, array_1d<double,TDim+1>& N, Element::Pointer & pElement, GlobalPointersVector< Element >& rElementsInTrajectory, unsigned int & rNumberOfElementsInTrajectory, unsigned int & rCheckFromElementNumber, ResultIteratorType ResultBegin, const unsigned int MaxNumberOfResults) { typedef std::size_t SizeType; //~ const array_1d<double,3>& coords = rPosition; array_1d<double,TDim+1> aux_N; //before using the bin to search for possible elements we check first the last element in which the particle was. GeometryType& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry(); bool is_found_1 = CalculatePosition(geom_default, rPosition[0], rPosition[1], rPosition[2], N); if(is_found_1 == true) { return true; //that was easy! } // If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element. for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++) { GeometryType& geom = rElementsInTrajectory[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom, rPosition[0], rPosition[1], rPosition[2], aux_N); if (is_found_2) { pElement = rElementsInTrajectory[i].shared_from_this(); N = aux_N; rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element. return true; } } // Now we check the neighbour elements: GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS); for (unsigned int i=0;i!=(neighb_elems.size());i++) { GeometryType& geom = neighb_elems[i].GetGeometry(); bool is_found_2 = CalculatePosition(geom, rPosition[0], rPosition[1], rPosition[2], N); if (is_found_2) { pElement = neighb_elems[i].shared_from_this(); if (rNumberOfElementsInTrajectory < 20) { rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement; rNumberOfElementsInTrajectory++; rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list } return true; } } // If checking all the neighbour elements did not work, we have to use the bins // ask to the container for the list of candidate elements SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults ); if(results_found>0) { //loop over the candidate elements and check if the particle falls within for(SizeType i = 0; i< results_found; i++) { GeometryType& geom = (*(ResultBegin + i))->GetGeometry(); //find local position bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N); if (is_found) { pElement = (*(ResultBegin + i))->shared_from_this(); if (rNumberOfElementsInTrajectory < 20) { rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement; rNumberOfElementsInTrajectory++; rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list } return true; } } } //not found case return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rGeom: the element (a triangle) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom, const double xc, const double yc, const double zc, array_1d<double,3> & N ) { double x0 = rGeom[0].X(); double y0 = rGeom[0].Y(); double x1 = rGeom[1].X(); double y1 = rGeom[1].Y(); double x2 = rGeom[2].X(); double y2 = rGeom[2].Y(); double area = CalculateVol(x0, y0, x1, y1, x2, y2); KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl; double inv_area = 1.0 / area; N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rNodesPositions of the element (a triangle) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions, const double xc, const double yc, const double zc, array_1d<double,3> & N ) { const double& x0 = rNodesPositions[0]; const double& y0 = rNodesPositions[1]; const double& x1 = rNodesPositions[3]; const double& y1 = rNodesPositions[4]; const double& x2 = rNodesPositions[6]; const double& y2 = rNodesPositions[7]; double area = CalculateVol(x0, y0, x1, y1, x2, y2); KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl; double inv_area = 1.0 / area; N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area; N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area; N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rGeom: the element (a tetrahedron) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { double x0 = rGeom[0].X(); double y0 = rGeom[0].Y(); double z0 = rGeom[0].Z(); double x1 = rGeom[1].X(); double y1 = rGeom[1].Y(); double z1 = rGeom[1].Z(); double x2 = rGeom[2].X(); double y2 = rGeom[2].Y(); double z2 = rGeom[2].Z(); double x3 = rGeom[3].X(); double y3 = rGeom[3].Y(); double z3 = rGeom[3].Z(); double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl; double inv_vol = 1.0 / vol; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /// Calculate the position of a given particle inside an element /** This function calculates the position of a given particle inside * an element and returns the shape functions that define it position * within the element and returns false if the particle is otuside * the element * * @param rNodesPositions of the element (a tetrahedron) * @param xc: the postition of the particle * @param yc: the postition of the particle * @param zc: the postition of the particle * @param N: the shape functions to define the particle position * * @return CalculatePosition */ inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions, const double xc, const double yc, const double zc, array_1d<double, 4 > & N ) { const double& x0 = rNodesPositions[0]; const double& y0 = rNodesPositions[1]; const double& z0 = rNodesPositions[2]; const double& x1 = rNodesPositions[3]; const double& y1 = rNodesPositions[4]; const double& z1 = rNodesPositions[5]; const double& x2 = rNodesPositions[6]; const double& y2 = rNodesPositions[7]; const double& z2 = rNodesPositions[8]; const double& x3 = rNodesPositions[9]; const double& y3 = rNodesPositions[10]; const double& z3 = rNodesPositions[11]; double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3); KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl; double inv_vol = 1.0 / vol; N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol; N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol; N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol; N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol; if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } /// Calculate the volume /** This function computes the area of a triangle */ inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0)); } /// Calculate the volume /** This function computes the volume of a tetrahedron */ inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ * 0.1666666666666666666667; } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos, BoundedMatrix<double, 7, 3 > & N ) { double one_third = 1.0 / 3.0; double one_sixt = 0.15; //1.0 / 6.0; double two_third = 0.7; //2.0 * one_third; N(0, 0) = one_sixt; N(0, 1) = one_sixt; N(0, 2) = two_third; N(1, 0) = two_third; N(1, 1) = one_sixt; N(1, 2) = one_sixt; N(2, 0) = one_sixt; N(2, 1) = two_third; N(2, 2) = one_sixt; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; //first pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X(); pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y(); pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z(); //second pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X(); pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y(); pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z(); //third pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X(); pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y(); pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); } /// Compute the Gauss points /** For a triangle * * @see PostReseed */ void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos, BoundedMatrix<double, 7, 3 > & N ) //2d { double one_third = 1.0 / 3.0; double one_eight = 0.12; //1.0 / 6.0; double three_quarters = 0.76; //2.0 * one_third; N(0, 0) = one_eight; N(0, 1) = one_eight; N(0, 2) = three_quarters; N(1, 0) = three_quarters; N(1, 1) = one_eight; N(1, 2) = one_eight; N(2, 0) = one_eight; N(2, 1) = three_quarters; N(2, 2) = one_eight; N(3, 0) = one_third; N(3, 1) = one_third; N(3, 2) = one_third; N(4, 0) = one_eight; N(4, 1) = 0.44; N(4, 2) = 0.44; N(5, 0) = 0.44; N(5, 1) = one_eight; N(5, 2) = 0.44; N(6, 0) = 0.44; N(6, 1) = 0.44; N(6, 2) = one_eight; //first pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X(); pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y(); pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z(); //second pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X(); pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y(); pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z(); //third pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X(); pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y(); pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z(); //fourth pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X(); pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y(); pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z(); //fifth pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X(); pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y(); pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z(); //sixth pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X(); pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y(); pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z(); //seventh pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X(); pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y(); pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z(); } /// Compute the Gauss points /** For a tetrahedron * * @see PostReseed */ void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos, BoundedMatrix<double, 9, 4 > & N ) //3D { double one_quarter = 0.25; double small_fraction = 0.1; //1.0 / 6.0; double big_fraction = 0.7; //2.0 * one_third; double mid_fraction = 0.3; //2.0 * one_third; N(0, 0) = big_fraction; N(0, 1) = small_fraction; N(0, 2) = small_fraction; N(0, 3) = small_fraction; N(1, 0) = small_fraction; N(1, 1) = big_fraction; N(1, 2) = small_fraction; N(1, 3) = small_fraction; N(2, 0) = small_fraction; N(2, 1) = small_fraction; N(2, 2) = big_fraction; N(2, 3) = small_fraction; N(3, 0) = small_fraction; N(3, 1) = small_fraction; N(3, 2) = small_fraction; N(3, 3) = big_fraction; N(4, 0) = one_quarter; N(4, 1) = one_quarter; N(4, 2) = one_quarter; N(4, 3) = one_quarter; N(5, 0) = small_fraction; N(5, 1) = mid_fraction; N(5, 2) = mid_fraction; N(5, 3) = mid_fraction; N(6, 0) = mid_fraction; N(6, 1) = small_fraction; N(6, 2) = mid_fraction; N(6, 3) = mid_fraction; N(7, 0) = mid_fraction; N(7, 1) = mid_fraction; N(7, 2) = small_fraction; N(7, 3) = mid_fraction; N(8, 0) = mid_fraction; N(8, 1) = mid_fraction; N(8, 2) = mid_fraction; N(8, 3) = small_fraction; pos=ZeroMatrix(9,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=9; j++) //going through the 9 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } /// Compute the Gauss points /** For a triangle * * @see PreReseed */ void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos, BoundedMatrix<double, 3, 3 > & N ) //2D { N(0, 0) = 0.5; N(0, 1) = 0.25; N(0, 2) = 0.25; N(1, 0) = 0.25; N(1, 1) = 0.5; N(1, 2) = 0.25; N(2, 0) = 0.25; N(2, 1) = 0.25; N(2, 2) = 0.5; //first pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X(); pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y(); pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z(); //second pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X(); pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y(); pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z(); //third pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X(); pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y(); pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z(); } /// Compute the Gauss points /** For a tetrahedron * * @see PreReseed */ void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos, BoundedMatrix<double, 4, 4 > & N ) //3D { //creating 4 particles, each will be closer to a node and equidistant to the other nodes N(0, 0) = 0.4; N(0, 1) = 0.2; N(0, 2) = 0.2; N(0, 3) = 0.2; N(1, 0) = 0.2; N(1, 1) = 0.4; N(1, 2) = 0.2; N(1, 3) = 0.2; N(2, 0) = 0.2; N(2, 1) = 0.2; N(2, 2) = 0.4; N(2, 3) = 0.2; N(3, 0) = 0.2; N(3, 1) = 0.2; N(3, 2) = 0.2; N(3, 3) = 0.4; pos=ZeroMatrix(4,3); for (unsigned int i=0; i!=4; i++) //going through the 4 nodes { array_1d<double, 3 > & coordinates = geom[i].Coordinates(); for (unsigned int j=0; j!=4; j++) //going through the 4 particles { for (unsigned int k=0; k!=3; k++) //x,y,z pos(j,k) += N(j,i) * coordinates[k]; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos, BoundedMatrix<double, 45, 3 > & N ) { unsigned int counter=0; for (unsigned int i=0; i!=9;i++) { for (unsigned int j=0; j!=(9-i);j++) { N(counter,0)=0.05+double(i)*0.1; N(counter,1)=0.05+double(j)*0.1; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); counter++; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos, BoundedMatrix<double, 15, 3 > & N ) //2D { unsigned int counter=0; for (unsigned int i=0; i!=5;i++) { for (unsigned int j=0; j!=(5-i);j++) { N(counter,0)=0.05+double(i)*0.2; N(counter,1)=0.05+double(j)*0.2; N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z(); counter++; } } } /// Compute the Gauss points /** */ void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos, BoundedMatrix<double, 20, 4 > & N ) //3D { double fraction_increment; unsigned int counter=0; for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles { for (unsigned int j=0; j!=(4-i);j++) { for (unsigned int k=0; k!=(4-i-j);k++) { N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1) //total = 1.0 - N(counter,0); fraction_increment = 0.27; // N(counter,1)=fraction_increment * (0.175 + double(j)); N(counter,2)=fraction_increment * (0.175 + double(k)); N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ; pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X(); pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y(); pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z(); counter++; } } } } /// check function int Check() { KRATOS_TRY NodeType& rnode = *mrModelPart.NodesBegin(); KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mVectorVar1), rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mScalarVar1), rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR, rnode) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(INTEGRATION_WEIGHT, rnode) return 0; KRATOS_CATCH("") } /// Member variables ModelPart& mrModelPart; int mNParticles; int mNElems; int mOffset; int mMaxSubSteps; double mMaxSubStepDt; int mMaxNumberOfParticles; std::vector< ShallowParticle > mParticlesVector; bool mOddTimeStep; bool mParticlePrintingToolInitialized; unsigned int mLastNodeId; DenseVector<int> mNumOfParticlesInElems; DenseVector<int> mNumOfParticlesInElemsAux; DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors; typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic; const Variable<double>* mScalarVar1; const Variable<array_1d<double,3>>* mVectorVar1; std::string m_scalar_var1_name; std::string m_vector_var1_name; }; // class MoveShallowWaterParticleUtility } // namespace Kratos. #endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
GB_unaryop__minv_int32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_uint16 // op(A') function: GB_tran__minv_int32_uint16 // C type: int32_t // A type: uint16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_uint16 ( int32_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__one_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_uint32_uint32 // op(A') function: GB_tran__one_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_uint32_uint32 ( uint32_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
stencil.c
/* Copyright (c) 2015 The University of Edinburgh. */ /* * This software was developed as part of the * EC FP7 funded project Adept (Project ID: 610490) * www.adept-project.eu */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* See the License for the specific language governing permissions and */ /* limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <omp.h> #include "level1.h" #include "utils.h" #define REPS 100 void float_stencil27(unsigned int size){ int i, j, k, iter; int n = size-2; float fac = 1.0/26; /* Work buffers, with halos */ float *a0 = (float*)malloc(sizeof(float)*size*size*size); float *a1 = (float*)malloc(sizeof(float)*size*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("27-point Single Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start, end; /* zero all of array (including halos) */ #pragma omp parallel for private(j,k) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { for (k = 0; k < size; k++) { a0[i*size*size+j*size+k] = 0.0; } } } #pragma omp parallel for private(j,k) /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = (float) rand()/ (float)(1.0 + RAND_MAX); } } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*size*size+j*size+k] = ( a0[i*size*size+(j-1)*size+k] + a0[i*size*size+(j+1)*size+k] + a0[(i-1)*size*size+j*size+k] + a0[(i+1)*size*size+j*size+k] + a0[(i-1)*size*size+(j-1)*size+k] + a0[(i-1)*size*size+(j+1)*size+k] + a0[(i+1)*size*size+(j-1)*size+k] + a0[(i+1)*size*size+(j+1)*size+k] + a0[i*size*size+(j-1)*size+(k-1)] + a0[i*size*size+(j+1)*size+(k-1)] + a0[(i-1)*size*size+j*size+(k-1)] + a0[(i+1)*size*size+j*size+(k-1)] + a0[(i-1)*size*size+(j-1)*size+(k-1)] + a0[(i-1)*size*size+(j+1)*size+(k-1)] + a0[(i+1)*size*size+(j-1)*size+(k-1)] + a0[(i+1)*size*size+(j+1)*size+(k-1)] + a0[i*size*size+(j-1)*size+(k+1)] + a0[i*size*size+(j+1)*size+(k+1)] + a0[(i-1)*size*size+j*size+(k+1)] + a0[(i+1)*size*size+j*size+(k+1)] + a0[(i-1)*size*size+(j-1)*size+(k+1)] + a0[(i-1)*size*size+(j+1)*size+(k+1)] + a0[(i+1)*size*size+(j-1)*size+(k+1)] + a0[(i+1)*size*size+(j+1)*size+(k+1)] + a0[i*size*size+j*size+(k-1)] + a0[i*size*size+j*size+(k+1)] ) * fac; } } } #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = a1[i*size*size+j*size+k]; } } } } // end omp parallel region } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Single Precision Stencil - 27 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void double_stencil27(unsigned int size){ int i, j, k, iter; int n = size-2; double fac = 1.0/26; /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*size*size*size); double *a1 = (double*)malloc(sizeof(double)*size*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("27-point Double Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start, end; /* zero all of array (including halos) */ #pragma omp parallel for private(j,k) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { for (k = 0; k < size; k++) { a0[i*size*size+j*size+k] = 0.0; } } } #pragma omp parallel for private(j,k) /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*size*size+j*size+k] = ( a0[i*size*size+(j-1)*size+k] + a0[i*size*size+(j+1)*size+k] + a0[(i-1)*size*size+j*size+k] + a0[(i+1)*size*size+j*size+k] + a0[(i-1)*size*size+(j-1)*size+k] + a0[(i-1)*size*size+(j+1)*size+k] + a0[(i+1)*size*size+(j-1)*size+k] + a0[(i+1)*size*size+(j+1)*size+k] + a0[i*size*size+(j-1)*size+(k-1)] + a0[i*size*size+(j+1)*size+(k-1)] + a0[(i-1)*size*size+j*size+(k-1)] + a0[(i+1)*size*size+j*size+(k-1)] + a0[(i-1)*size*size+(j-1)*size+(k-1)] + a0[(i-1)*size*size+(j+1)*size+(k-1)] + a0[(i+1)*size*size+(j-1)*size+(k-1)] + a0[(i+1)*size*size+(j+1)*size+(k-1)] + a0[i*size*size+(j-1)*size+(k+1)] + a0[i*size*size+(j+1)*size+(k+1)] + a0[(i-1)*size*size+j*size+(k+1)] + a0[(i+1)*size*size+j*size+(k+1)] + a0[(i-1)*size*size+(j-1)*size+(k+1)] + a0[(i-1)*size*size+(j+1)*size+(k+1)] + a0[(i+1)*size*size+(j-1)*size+(k+1)] + a0[(i+1)*size*size+(j+1)*size+(k+1)] + a0[i*size*size+j*size+(k-1)] + a0[i*size*size+j*size+(k+1)] ) * fac; } } } #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = a1[i*size*size+j*size+k]; } } } } // end omp parallel region } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Double Precision Stencil - 27 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void float_stencil19(unsigned int size){ int i, j, k, iter; int n = size-2; float fac = 1.0/18; /* Work buffers, with halos */ float *a0 = (float*)malloc(sizeof(float)*size*size*size); float *a1 = (float*)malloc(sizeof(float)*size*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("19-point Single Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start,end; /* zero all of array (including halos) */ #pragma omp parallel for private(j,k) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { for (k = 0; k < size; k++) { a0[i*size*size+j*size+k] = 0.0; } } } /* use random numbers to fill interior */ #pragma omp parallel for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = (float) rand()/ (float)(1.0 + RAND_MAX); } } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*size*size+j*size+k] = ( a0[i*size*size+(j-1)*size+k] + a0[i*size*size+(j+1)*size+k] + a0[(i-1)*size*size+j*size+k] + a0[(i+1)*size*size+j*size+k] + a0[(i-1)*size*size+(j-1)*size+k] + a0[(i-1)*size*size+(j+1)*size+k] + a0[(i+1)*size*size+(j-1)*size+k] + a0[(i+1)*size*size+(j+1)*size+k] + a0[i*size*size+(j-1)*size+(k-1)] + a0[i*size*size+(j+1)*size+(k-1)] + a0[(i-1)*size*size+j*size+(k-1)] + a0[(i+1)*size*size+j*size+(k-1)] + a0[i*size*size+(j-1)*size+(k+1)] + a0[i*size*size+(j+1)*size+(k+1)] + a0[(i-1)*size*size+j*size+(k+1)] + a0[(i+1)*size*size+j*size+(k+1)] + a0[i*size*size+j*size+(k-1)] + a0[i*size*size+j*size+(k+1)] ) * fac; } } } #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = a1[i*size*size+j*size+k]; } } } } // end omp parallel region } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Single Precision Stencil - 19 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void double_stencil19(unsigned int size){ int i, j, k, iter; int n = size-2; double fac = 1.0/18; /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*size*size*size); double *a1 = (double*)malloc(sizeof(double)*size*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("19-point Double Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start,end; /* zero all of array (including halos) */ #pragma omp parallel for private(j,k) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { for (k = 0; k < size; k++) { a0[i*size*size+j*size+k] = 0.0; } } } /* use random numbers to fill interior */ #pragma omp parallel for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*size*size+j*size+k] = ( a0[i*size*size+(j-1)*size+k] + a0[i*size*size+(j+1)*size+k] + a0[(i-1)*size*size+j*size+k] + a0[(i+1)*size*size+j*size+k] + a0[(i-1)*size*size+(j-1)*size+k] + a0[(i-1)*size*size+(j+1)*size+k] + a0[(i+1)*size*size+(j-1)*size+k] + a0[(i+1)*size*size+(j+1)*size+k] + a0[i*size*size+(j-1)*size+(k-1)] + a0[i*size*size+(j+1)*size+(k-1)] + a0[(i-1)*size*size+j*size+(k-1)] + a0[(i+1)*size*size+j*size+(k-1)] + a0[i*size*size+(j-1)*size+(k+1)] + a0[i*size*size+(j+1)*size+(k+1)] + a0[(i-1)*size*size+j*size+(k+1)] + a0[(i+1)*size*size+j*size+(k+1)] + a0[i*size*size+j*size+(k-1)] + a0[i*size*size+j*size+(k+1)] ) * fac; } } } #pragma omp for private(j,k) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*size*size+j*size+k] = a1[i*size*size+j*size+k]; } } } } // end omp parallel region } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Double Precision Stencil - 19 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void float_stencil9(unsigned int size){ int i, j, iter; int n = size-2; float fac = 1.0/8; /* Work buffers, with halos */ float *a0 = (float*)malloc(sizeof(float)*size*size); float *a1 = (float*)malloc(sizeof(float)*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("9-point Single Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start,end; /* zero all of array (including halos) */ #pragma omp parallel for private(j) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a0[i*size+j] = 0.0; } } /* use random numbers to fill interior */ #pragma omp parallel for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = (float) rand()/ (float)(1.0 + RAND_MAX); } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a1[i*size+j] = ( a0[i*size+(j-1)] + a0[i*size+(j+1)] + a0[(i-1)*size+j] + a0[(i+1)*size+j] + a0[(i-1)*size+(j-1)] + a0[(i-1)*size+(j+1)] + a0[(i+1)*size+(j-1)] + a0[(i+1)*size+(j+1)] ) * fac; } } #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = a1[i*size+j]; } } } // end omp parallel for } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Single Precision Stencil - 9 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void double_stencil9(unsigned int size){ int i, j, iter; int n = size-2; double fac = 1.0/8; /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*size*size); double *a1 = (double*)malloc(sizeof(double)*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("9-point Double Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start,end; /* zero all of array (including halos) */ #pragma omp parallel for private(j) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a0[i*size+j] = 0.0; } } /* use random numbers to fill interior */ #pragma omp parallel for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = (double) rand()/ (double)(1.0 + RAND_MAX); } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a1[i*size+j] = ( a0[i*size+(j-1)] + a0[i*size+(j+1)] + a0[(i-1)*size+j] + a0[(i+1)*size+j] + a0[(i-1)*size+(j-1)] + a0[(i-1)*size+(j+1)] + a0[(i+1)*size+(j-1)] + a0[(i+1)*size+(j+1)] ) * fac; } } #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = a1[i*size+j]; } } } // end omp parallel for } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Double Precision Stencil - 9 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void float_stencil5(unsigned int size){ int i, j, iter; int n = size-2; float fac = 1.0/8; /* Work buffers, with halos */ float *a0 = (float*)malloc(sizeof(float)*size*size); float *a1 = (float*)malloc(sizeof(float)*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("9-point Single Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start,end; /* zero all of array (including halos) */ #pragma omp parallel for private(j) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a0[i*size+j] = 0.0; } } /* use random numbers to fill interior */ #pragma omp parallel for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = (float) rand()/ (float)(1.0 + RAND_MAX); } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a1[i*size+j] = ( a0[i*size+(j-1)] + a0[i*size+(j+1)] + a0[(i-1)*size+j] + a0[(i+1)*size+j] ) * fac; } } #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = a1[i*size+j]; } } } //end omp parallel region } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Single Precision Stencil - 5 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); } void double_stencil5(unsigned int size){ int i, j, iter; int n = size-2; double fac = 1.0/8; /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*size*size); double *a1 = (double*)malloc(sizeof(double)*size*size); if(a0==NULL||a1==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ printf("9-point Double Precision Stencil Error: Unable to allocate memory\n"); } #pragma omp parallel { if (omp_get_thread_num() == 0){ printf("Running on Host with %d OpenMP thread(s):\n\n",omp_get_num_threads()); } } struct timespec start,end; /* zero all of array (including halos) */ #pragma omp parallel for private(j) for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { a0[i*size+j] = 0.0; } } /* use random numbers to fill interior */ #pragma omp parallel for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = (double) rand()/ (double)(1.0 + RAND_MAX); } } /* run main computation on host */ clock_gettime(CLOCK, &start); for (iter = 0; iter < REPS; iter++) { #pragma omp parallel { #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a1[i*size+j] = ( a0[i*size+(j-1)] + a0[i*size+(j+1)] + a0[(i-1)*size+j] + a0[(i+1)*size+j] ) * fac; } } #pragma omp for private(j) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { a0[i*size+j] = a1[i*size+j]; } } } //end omp parallel region } /* end iteration loop */ clock_gettime(CLOCK, &end); elapsed_time_hr(start, end, "Double Precision Stencil - 5 point"); /* Free malloc'd memory to prevent leaks */ free(a0); free(a1); }
vecops.c
/* MIT License Copyright (c) 2019 Gleb Goussarov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "vecops.h" #include <string.h> double* vec_cpy(double* source, size_t len) { double* result; size_t i = 0; result = malloc(sizeof(double)*len); for (i = 0;i < len;i++) result[i] = source[i]; return result; } /* conversions */ int32_t* vec_to_veci32(double* input, size_t len){ int32_t* result; size_t i; result = malloc(sizeof(int32_t)*len); for (i = 0;i < len;i++) result[i] = (int32_t)(input[i]); return result; } int64_t* vec_to_veci64(double* input, size_t len){ int64_t* result; size_t i; result = malloc(sizeof(int64_t)*len); for (i = 0;i < len;i++) result[i] = (int64_t)(input[i]); return result; } double* veci32_to_vec(int32_t* input, size_t len){ double* result; size_t i; result = malloc(sizeof(double)*len); for (i = 0;i < len;i++) result[i] = (double)(input[i]); return result; } double* veci64_to_vec(int64_t* input, size_t len){ double* result; size_t i; result = malloc(sizeof(double)*len); for (i = 0;i < len;i++) result[i] = (double)(input[i]); return result; } int32_t* veci64_to_veci32(int64_t* input, size_t len){ int32_t* result; size_t i; result = malloc(sizeof(int32_t)*len); for (i = 0;i < len;i++) result[i] = (int32_t)(input[i]); return result; } int64_t* veci32_to_veci64(int32_t* input, size_t len){ int64_t* result; size_t i; result = malloc(sizeof(int64_t)*len); for (i = 0;i < len;i++) result[i] = (int64_t)(input[i]); return result; } size_t vec_maxid(double* source, size_t len) { size_t i; size_t id; double value = 0.0; id = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) { if (value < source[i]) { value = source[i]; id = i; } } return id; } size_t vec_minid(double* source, size_t len) { size_t i; size_t id; double value = 0.0; id = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) { if (value > source[i]) { value = source[i]; id = i; } } return id; } size_t vec_quantileid(double* source, size_t len, size_t targid) { /* uses a variation of quick-sort, where only the relevant portion is sorted */ size_t* neworder; size_t* oldorder; size_t* tmpptr; size_t result; double pivot_val; double minval, maxval; size_t i, i0, imax, nextleft, nextright; int sorted; if (targid >= len) return vec_maxid(source, len); sorted = 1; for (i = 1;i < len;i++) { if (source[i] < source[i - 1]) { sorted = 0; break; } } if (sorted) return targid; oldorder = malloc(sizeof(size_t)*len); neworder = malloc(sizeof(size_t)*len); minval = source[0]; maxval = source[0]; oldorder[0] = 0; sorted = 0; for (i = 1;i < len;i++) { if (minval > source[i]) minval = source[i]; if (maxval < source[i]) maxval = source[i]; oldorder[i] = i; } pivot_val = minval + (maxval - minval)*((double)(targid) / (double)(len)); i0 = 0; imax = len; /* iterate until either the leftmost or the rightmost element matches the target quantile */ while (imax > targid + 1 && i0 < targid && !sorted) { nextleft = i0; nextright = imax - 1; /* At the end of the following loop, all elements smaller than pivot_val are on the left of nextleft */ sorted = 1; for (i = i0;i < imax;i++) { if (sorted && i > i0 && source[oldorder[i]] > source[oldorder[i - 1]]) sorted = 0; if (source[oldorder[i]] < pivot_val) { neworder[nextleft] = oldorder[i]; nextleft++; } else { neworder[nextright] = oldorder[i]; nextright--; } } if (nextleft < targid) i0 = nextleft+1; else if (nextleft == targid) i0 = nextleft; else imax = nextleft; tmpptr = oldorder; oldorder = neworder; neworder = tmpptr; pivot_val = source[oldorder[i0]]; } if (sorted) result = oldorder[targid]; else { if (i0 == targid) { /* leftmost element matches quantile - find the smallest element */ result = oldorder[i0]; for (i = i0 + 1;i < imax;i++) { if (source[oldorder[i]] < source[result])result = oldorder[i]; } } else if (imax == targid + 1) { result = oldorder[i0]; /* rightmost element matches quantile - find the biggest element */ for (i = i0 + 1;i < imax;i++) { if (source[oldorder[i]] > source[result])result = oldorder[i]; } } else { /* multiple elements seem to match - take the central one */ result = oldorder[(i0 + imax) / 2]; } } free(neworder); free(oldorder); return result; } size_t vec_medid(double* source, size_t len) { return vec_quantileid(source, len, len / 2); } double vec_max(double * source, size_t len) { size_t i; double value = 0.0; if (len>0) value = source[0]; for (i = 1;i < len;i++) value = value > source[i] ? value : source[i]; return value; } double vec_min(double * source, size_t len) { size_t i; double value = 0.0; if (len>0) value = source[0]; for (i = 1;i < len;i++) value = value < source[i] ? value : source[i]; return value; } double vec_med(double* source, size_t len) { return source[vec_medid(source,len)]; } double vec_avg(double* source, size_t len) { size_t i; double value = 0.0; for (i = 0;i < len;i++) value += source[i]; value /= (double)len; return value; } double vec_norm(double* source, size_t len) { size_t i; double value = 0.0; for (i = 0;i < len;i++) value += source[i] * source[i]; value = sqrt(value); return value; } double vec_sum(double* source, size_t len) { size_t i; double value = 0.0; for (i = 0;i < len;i++) value += source[i]; return value; } void vec_zero(double* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 0.0; } void vec_ones(double* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 1.0; } void vec_setall(double* target, double val, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = val; } void vec_add(double* target, double * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] += B[i]; } void vec_add_all(double* target, double B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] += B; } void vec_subtract(double* target, double * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] -= B[i]; } void vec_subtract_all(double* target, double B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] -= B; } void vec_scale(double* target, double factor, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] *= factor; } void vec_opposite(double* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = -target[i]; } void vec_inverse(double* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 1.0 / target[i]; } void vec_dot(double* target, double * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] *= B[i]; } void vec_abs(double* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = target[i] > 0.0 ? target[i] : -target[i]; } /* same instructions for i32 vectors, with the exception of inverse, which does not make sense in the context of integers */ int32_t* veci32_cpy(int32_t* source, size_t len) { int32_t* result; size_t i = 0; result = malloc(sizeof(int32_t)*len); for (i = 0;i < len;i++) result[i] = source[i]; return result; } size_t veci32_maxid(int32_t* source, size_t len) { size_t i; size_t id; int32_t value = 0; id = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) { if (value > source[i]) { value = source[i]; id = i; } } return id; } size_t veci32_minid(int32_t* source, size_t len) { size_t i; size_t id; int32_t value = 0; id = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) { if (value < source[i]) { value = source[i]; id = i; } } return id; } size_t veci32_quantileid(int32_t* source, size_t len, size_t targid) { /* uses a variation of quick-sort, where only the relevant portion is sorted */ size_t* neworder; size_t* oldorder; size_t* tmpptr; size_t result; int32_t pivot_val; int32_t minval, maxval; size_t i, i0, imax, nextleft, nextright; int sorted; if (targid >= len) return veci32_maxid(source, len); sorted = 1; for (i = 1;i < len;i++) { if (source[i] < source[i - 1]) { sorted = 0; break; } } if (sorted) return targid; oldorder = malloc(sizeof(size_t)*len); neworder = malloc(sizeof(size_t)*len); minval = source[0]; maxval = source[0]; oldorder[0] = 0; sorted = 0; for (i = 1;i < len;i++) { if (minval > source[i]) minval = source[i]; if (maxval < source[i]) maxval = source[i]; oldorder[i] = i; } pivot_val = minval + (int32_t)((maxval - minval)*((double)(targid) / (double)(len))); i0 = 0; imax = len; /* iterate until either the leftmost or the rightmost element matches the target quantile */ while (imax > targid + 1 && i0 < targid && !sorted) { nextleft = i0; nextright = imax - 1; /* At the end of the following loop, all elements smaller than pivot_val are on the left of nextleft */ sorted = 1; for (i = i0;i < imax;i++) { if (sorted && i > i0 && source[oldorder[i]] > source[oldorder[i - 1]]) sorted = 0; if (source[oldorder[i]] < pivot_val) { neworder[nextleft] = oldorder[i]; nextleft++; } else { neworder[nextright] = oldorder[i]; nextright--; } } if (nextleft < targid) i0 = nextleft + 1; else if (nextleft == targid) i0 = nextleft; else imax = nextleft; tmpptr = oldorder; oldorder = neworder; neworder = tmpptr; pivot_val = source[oldorder[i0]]; } if (sorted) result = oldorder[targid]; else { if (i0 == targid) { /* leftmost element matches quantile - find the smallest element */ result = oldorder[i0]; for (i = i0 + 1;i < imax;i++) { if (source[oldorder[i]] < source[result])result = oldorder[i]; } } else if (imax == targid + 1) { result = oldorder[i0]; /* rightmost element matches quantile - find the biggest element */ for (i = i0 + 1;i < imax;i++) { if (source[oldorder[i]] > source[result])result = oldorder[i]; } } else { /* multiple elements seem to match - take the central one */ result = oldorder[(i0 + imax) / 2]; } } free(neworder); free(oldorder); return result; } size_t veci32_medid(int32_t* source, size_t len) { return veci32_quantileid(source, len, len / 2); } int32_t veci32_max(int32_t * source, size_t len) { size_t i; int32_t value = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) value = value > source[i] ? value : source[i]; return value; } int32_t veci32_min(int32_t * source, size_t len) { size_t i; int32_t value = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) value = value < source[i] ? value : source[i]; return value; } int32_t veci32_med(int32_t* source, size_t len) { return source[veci32_medid(source, len)]; } int32_t veci32_avg(int32_t* source, size_t len) { size_t i; int32_t value = 0; for (i = 0;i < len;i++) value += source[i]; value /= (int32_t)len; return value; } int32_t veci32_norm(int32_t* source, size_t len) { size_t i; double value = 0; for (i = 0;i < len;i++) value += source[i] * source[i]; value = sqrt(value); return (int32_t)value; } int32_t veci32_sum(int32_t* source, size_t len) { size_t i; int32_t value = 0; for (i = 0;i < len;i++) value += source[i]; return value; } int64_t veci32_sum64(int32_t* source, size_t len) { size_t i; int64_t value = 0; for (i = 0;i < len;i++) value += source[i]; return value; } void veci32_zero(int32_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 0; } void veci32_ones(int32_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 1; } void veci32_setall(int32_t* target, int32_t val, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = val; } void veci32_add(int32_t* target, int32_t * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] += B[i]; } void veci32_add_all(int32_t* target, int32_t B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] += B; } void veci32_subtract(int32_t* target, int32_t * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] -= B[i]; } void veci32_subtract_all(int32_t* target, int32_t B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] -= B; } void veci32_scale(int32_t* target, int32_t factor, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] *= factor; } void veci32_opposite(int32_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = -target[i]; } void veci32_dot(int32_t* target, int32_t * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] *= B[i]; } void veci32_abs(int32_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = target[i] > 0 ? target[i] : -target[i]; } /* same instructions for i64 vectors, with the exception of inverse, which does not make sense in the context of integers */ int64_t* veci64_cpy(int64_t* source, size_t len) { int64_t* result; size_t i = 0; result = malloc(sizeof(int64_t)*len); for (i = 0;i < len;i++) result[i] = source[i]; return result; } size_t veci64_maxid(int64_t* source, size_t len) { size_t i; size_t id; int64_t value = 0; id = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) { if (value > source[i]) { value = source[i]; id = i; } } return id; } size_t veci64_minid(int64_t* source, size_t len) { size_t i; size_t id; int64_t value = 0; id = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) { if (value < source[i]) { value = source[i]; id = i; } } return id; } size_t veci64_quantileid(int64_t* source, size_t len, size_t targid) { /* uses a variation of quick-sort, where only the relevant portion is sorted */ size_t* neworder; size_t* oldorder; size_t* tmpptr; size_t result; int64_t pivot_val; int64_t minval, maxval; size_t i, i0, imax, nextleft, nextright; int sorted; if (targid >= len) return veci64_maxid(source, len); sorted = 1; for (i = 1;i < len;i++) { if (source[i] < source[i - 1]) { sorted = 0; break; } } if (sorted) return targid; oldorder = malloc(sizeof(size_t)*len); neworder = malloc(sizeof(size_t)*len); minval = source[0]; maxval = source[0]; oldorder[0] = 0; sorted = 0; for (i = 1;i < len;i++) { if (minval > source[i]) minval = source[i]; if (maxval < source[i]) maxval = source[i]; oldorder[i] = i; } pivot_val = minval + (int64_t)((maxval - minval)*((double)(targid) / (double)(len))); i0 = 0; imax = len; /* iterate until either the leftmost or the rightmost element matches the target quantile */ while (imax > targid + 1 && i0 < targid && !sorted) { nextleft = i0; nextright = imax - 1; /* At the end of the following loop, all elements smaller than pivot_val are on the left of nextleft */ sorted = 1; for (i = i0;i < imax;i++) { if (sorted && i > i0 && source[oldorder[i]] > source[oldorder[i - 1]]) sorted = 0; if (source[oldorder[i]] < pivot_val) { neworder[nextleft] = oldorder[i]; nextleft++; } else { neworder[nextright] = oldorder[i]; nextright--; } } if (nextleft < targid) i0 = nextleft + 1; else if (nextleft == targid) i0 = nextleft; else imax = nextleft; tmpptr = oldorder; oldorder = neworder; neworder = tmpptr; pivot_val = source[oldorder[i0]]; } if (sorted) result = oldorder[targid]; else { if (i0 == targid) { /* leftmost element matches quantile - find the smallest element */ result = oldorder[i0]; for (i = i0 + 1;i < imax;i++) { if (source[oldorder[i]] < source[result])result = oldorder[i]; } } else if (imax == targid + 1) { result = oldorder[i0]; /* rightmost element matches quantile - find the biggest element */ for (i = i0 + 1;i < imax;i++) { if (source[oldorder[i]] > source[result])result = oldorder[i]; } } else { /* multiple elements seem to match - take the central one */ result = oldorder[(i0 + imax) / 2]; } } free(neworder); free(oldorder); return result; } size_t veci64_medid(int64_t* source, size_t len) { return veci64_quantileid(source, len, len / 2); } int64_t veci64_max(int64_t * source, size_t len) { size_t i; int64_t value = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) value = value > source[i] ? value : source[i]; return value; } int64_t veci64_min(int64_t * source, size_t len) { size_t i; int64_t value = 0; if (len>0) value = source[0]; for (i = 1;i < len;i++) value = value < source[i] ? value : source[i]; return value; } int64_t veci64_med(int64_t* source, size_t len) { return source[veci64_medid(source, len)]; } int64_t veci64_avg(int64_t* source, size_t len) { size_t i; int64_t value = 0; for (i = 0;i < len;i++) value += source[i]; value /= (int64_t)len; return value; } int64_t veci64_norm(int64_t* source, size_t len) { size_t i; double value = 0.0; for (i = 0;i < len;i++) value += source[i] * source[i]; value = sqrt(value); return (int64_t)value; } int64_t veci64_sum(int64_t* source, size_t len) { size_t i; int64_t value = 0; for (i = 0;i < len;i++) value += source[i]; return value; } void veci64_zero(int64_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 0; } void veci64_ones(int64_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = 1; } void veci64_setall(int64_t* target, int64_t val, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = val; } void veci64_add(int64_t* target, int64_t * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] += B[i]; } void veci64_add_all(int64_t* target, int64_t B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] += B; } void veci64_subtract(int64_t* target, int64_t * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] -= B[i]; } void veci64_subtract_all(int64_t* target, int64_t B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] -= B; } void veci64_scale(int64_t* target, int64_t factor, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] *= factor; } void veci64_opposite(int64_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = -target[i]; } void veci64_dot(int64_t* target, int64_t * B, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] *= B[i]; } void veci64_abs(int64_t* target, size_t len) { size_t i = 0; for (i = 0;i < len;i++) target[i] = target[i] > 0 ? target[i] : -target[i]; } #define arrswapids(arr,index1,index2,tmpvar) tmpvar = arr[index1]; arr[index1] = arr[index2]; arr[index2] = tmpvar static void _vec_quicksort(double* target, size_t len) { size_t pivot; double pivotvalue; double tmp; size_t i; if (len < 2) return; pivot = 1; pivotvalue = target[0]; for (i = 1;i < len; i++) { if (target[i] <= pivotvalue) { arrswapids(target, i, pivot, tmp); pivot++; } } if (pivot == len) { pivot--; arrswapids(target, 0, pivot, tmp); /* doing this prevents an infinite loop */ } _vec_quicksort(target, pivot); _vec_quicksort(target+pivot, len-pivot); } static void _vec_heapify(double* target, size_t len, size_t node) { size_t newroot = node; size_t left, right; double tmp; left = (node << 1) + 1; right = (node << 1) + 2; if (left < len && target[left] > target[newroot]) newroot = left; if (right < len && target[right] > target[newroot]) newroot = right; if (newroot != node) { arrswapids(target, node, newroot, tmp); _vec_heapify(target, len, newroot); } } static void _vec_sort__wdepth(double* target, size_t len, size_t depth, size_t maxdepth) { size_t pivot; double pivotvalue; double tmp; size_t i,j,k; if (len < 16) { for (i = 1;i < len;i++) { j = i; while (j > 0 && target[j] < target[j - 1]) j--; tmp = target[i]; for (k = i;k > j;k--) { target[k] = target[k - 1]; } target[j] = tmp; } } else if(depth<maxdepth){ pivot = 1; pivotvalue = target[0]; for (i = 1;i < len; i++) { if (target[i] <= pivotvalue) { arrswapids(target, i, pivot, tmp); pivot++; } } if (pivot == len) { pivot--; arrswapids(target, 0, pivot, tmp); } _vec_quicksort(target, pivot); _vec_quicksort(target + pivot, len - pivot); } else { for (i = len / 2 - 1; i >= 0; i--) { _vec_heapify(target, len, i); } for (i = len - 1; i >= 0; i--) { arrswapids(target, 0, i, tmp); _vec_heapify(target, i, 0); } } } void vec_sort(double* target, size_t len) { _vec_sort__wdepth(target, len, 0, 32); } static void _vec_quicksort_by(void* target, size_t elt_size, double* values, size_t len) { size_t pivot; double pivotvalue; double tmp; char part_c; char* target_c; int sorted; size_t i,j,k,k0,k1; if (len < 2) return; target_c = target; sorted = 1; for (i = 1;i < len; i++) { if (values[i] < values[i - 1]) { sorted = 0; break; } } if (sorted) return; if (len == 2) { if (values[0] > values[1]) { arrswapids(values, 0, 1, tmp); k0 = 0; k1 = elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, k0 + j, k1 + j, part_c); } } return; } pivot = (len + 1) / 2 - 1; pivotvalue = values[pivot]; tmp = 0; if (values[pivot] < values[0]) { arrswapids(values, 0, pivot, tmp); k0 = 0; k1 = pivot*elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, k0 + j, k1 + j, part_c); } } if (values[len - 1] < values[0]) { arrswapids(values, 0, len - 1, tmp); k0 = 0; k1 = (len - 1)*elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, k0 + j, k1 + j, part_c); } } if (values[pivot] < values[len - 1]) { arrswapids(values, pivot, len - 1, tmp); k0 = pivot*elt_size; k1 = (len - 1)*elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, k0 + j, k1 + j, part_c); } } pivotvalue = values[len - 1]; i = 0; j = len - 1; while (values[i] < pivotvalue)i++; while (j>0 && values[j] >= pivotvalue)j--; if (i < j) { while (i < j) { arrswapids(values, i, j, tmp); k0 = i*elt_size; k1 = j*elt_size; for (k = 0;k < elt_size;k++) { arrswapids(target_c, k0 + k, k1 + k, part_c); } i++; if (j>0)j--; while (values[i] < pivotvalue)i++; while (j>0 && values[j] > pivotvalue)j--; } } else if (i == 0) { i++; } pivot = i; _vec_quicksort_by(target, elt_size, values, pivot); _vec_quicksort_by((void*)(((char*)target)+elt_size*pivot), elt_size, values + pivot, len - pivot); } void vec_sort_by(void* target, size_t elt_size, double* values, size_t numel) { _vec_quicksort_by(target, elt_size, values, numel); } void _vec_quicksorti64(int64_t* target, size_t len) { size_t pivot; int64_t pivotvalue; int64_t tmp; size_t i,j; if (len < 2) return; if (len == 2) { if (target[0] > target[1]) { arrswapids(target, 0, 1, tmp); } return; } pivot = (len+1)/2-1; pivotvalue = target[pivot]; tmp = 0; if (target[pivot] < target[0]) { arrswapids(target, 0, pivot, tmp); } if (target[len - 1] < target[0]) { arrswapids(target, 0, len - 1, tmp); } if (target[pivot] < target[len - 1]) { arrswapids(target, pivot, len - 1, tmp); } pivotvalue = target[len - 1]; i = 0; j = len - 1; while (target[i] < pivotvalue)i++; while (j>0 && target[j] >= pivotvalue)j--; if (i < j) { while (i < j) { arrswapids(target, i, j, tmp); i++; if(j>0)j--; while (target[i] < pivotvalue)i++; while (j>0 && target[j] > pivotvalue)j--; } } pivot = i; _vec_quicksorti64(target, pivot); _vec_quicksorti64(target + pivot, len - pivot); } size_t _vec_quicksorti64_nofollowup(int64_t* target, size_t len) { size_t pivot; int64_t pivotvalue; int64_t tmp; size_t i, j; if (len < 2) return 0; if (len == 2) { if (target[0] > target[1]) { arrswapids(target, 0, 1, tmp); } return 0; } pivot = (len + 1) / 2 - 1; pivotvalue = target[pivot]; tmp = 0; if (target[pivot] < target[0]) { arrswapids(target, 0, pivot, tmp); } if (target[len - 1] < target[0]) { arrswapids(target, 0, len - 1, tmp); } if (target[pivot] < target[len - 1]) { arrswapids(target, pivot, len - 1, tmp); } pivotvalue = target[len - 1]; i = 0; j = len - 1; while (target[i] < pivotvalue)i++; while (j>0 && target[j] >= pivotvalue)j--; if (i < j) { while (i < j) { arrswapids(target, i, j, tmp); i++; if (j>0)j--; while (target[i] < pivotvalue)i++; while (j>0 && target[j] > pivotvalue)j--; } } pivot = i; return pivot; } void _vec_heapifyi64(int64_t* target, size_t len, size_t node) { size_t newroot = node; size_t left, right; int64_t tmp; left = (node << 1) + 1; right = (node << 1) + 2; if (left < len && target[left] > target[newroot]) newroot = left; if (right < len && target[right] > target[newroot]) newroot = right; if (newroot != node) { arrswapids(target, node, newroot, tmp); _vec_heapifyi64(target, len, newroot); } } void _vec_sort__wdepthi64(int64_t* target, size_t len, size_t depth, size_t maxdepth) { size_t pivot; int64_t tmp; size_t i, j, k; if (len < 8) { /* insertion sort */ for (i = 1;i < len;i++) { j = i; while (j > 0 && target[i] < target[j - 1]) j--; tmp = target[i]; for (k = i;k > j;k--) { target[k] = target[k - 1]; } target[j] = tmp; } } else if (depth<maxdepth) { pivot = _vec_quicksorti64_nofollowup(target, len); _vec_sort__wdepthi64(target, pivot, depth+1, maxdepth); _vec_sort__wdepthi64(target + pivot, len - pivot, depth + 1, maxdepth); } else { /* note i is unsigned, so if it ever gets bigger than its starting value, that means it's reached 0 */ for (i = len / 2 - 1; i < len; i--) { _vec_heapifyi64(target, len, i); } for (i = len - 1; i < len; i--) { arrswapids(target, 0, i, tmp); _vec_heapifyi64(target, i, 0); } } } void vec_sorti64(int64_t* target, size_t len) { _vec_sort__wdepthi64(target, len, 0, 32); } void vec_reorder_byi64(void* target, size_t elt_size, int64_t* new_order, size_t numel) { char* tmp; size_t i; tmp = (char*)malloc(elt_size*numel); memcpy(tmp, target, elt_size*numel); for (i = 0;i < numel;i++) { memcpy(((char*)target) + elt_size*i, tmp + elt_size*new_order[i], elt_size); } free(tmp); } static inline void _vec_insertsortbyvec(void* target, size_t elt_size, double* values, size_t count) { size_t i, j, insertat; double curvalue; int64_t* neworder; neworder = (int64_t*)malloc(sizeof(int64_t)*count); for (i = 0;i < count;i++) { neworder[i] = (int64_t)i; } for (i = 1;i < count;i++) { insertat = i; curvalue = values[i]; j = i; while (j > 0 && values[j - 1] > curvalue) { neworder[j] = neworder[j - 1]; values[j] = values[j - 1]; j--; } neworder[j] = i; values[j] = curvalue; } vec_reorder_byi64(target, elt_size, neworder, count); free(neworder); } static inline void _vec_insertsortbyveci64(void* target, size_t elt_size, int64_t* values, size_t count) { size_t i, j, insertat; int64_t curvalue; int64_t* neworder; neworder = (int64_t*)malloc(sizeof(int64_t)*count); for (i = 0;i < count;i++) { neworder[i] = (int64_t)i; } for (i = 1;i < count;i++) { insertat = i; curvalue = values[i]; j = i; while (j > 0 && values[j - 1] > curvalue) { neworder[j] = neworder[j - 1]; values[j] = values[j - 1]; j--; } neworder[j] = i; values[j] = curvalue; } vec_reorder_byi64(target, elt_size, neworder, count); free(neworder); } static inline void _vec_insertsortbyi64(void* target, size_t elt_size, int64_t* values, size_t count) { size_t i, j, insertat; int64_t curvalue; int64_t* neworder; neworder = (int64_t*)malloc(sizeof(int64_t)*count); for (i = 0;i < count;i++) { neworder[i] = (int64_t)i; } for (i = 1;i < count;i++) { insertat = i; curvalue = values[i]; j = i; while (j > 0 && values[j] > curvalue) { neworder[j] = neworder[j - 1]; values[j] = values[j - 1]; j--; } neworder[j] = i; values[j] = curvalue; } vec_reorder_byi64(target, elt_size, neworder, count); } static inline size_t _vec_quicksortbyi64_makepivot(void* target, size_t elt_size, int64_t* values, size_t begin, size_t end) { size_t pivot, i, j, k0, k1; int64_t pivotvalue, tmp; char* target_c; char part_c; int sorted; if ((int64_t)end - (int64_t)begin < 2)return end; if ((int64_t)end - (int64_t)begin < 16) { } pivotvalue = values[begin]; sorted = 1; pivot = begin; for (i = begin + 1;i < end; i++) { if (values[i] < values[i - 1]) sorted = 0; if (values[i] <= pivotvalue) { /* Whenever a smaller value is discovered, it takes the place of the pivot. */ /* This ensures that a different pivot is selected each time this function is called. */ arrswapids(values, i, pivot, tmp); k0 = i*elt_size; k1 = pivot*elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, k0 + j, k1 + j, part_c); } pivot++; } } if (pivot == begin) pivot++; if (pivot == end) pivot--; if (sorted) pivot = end; /* result: every element with a value <= pivotvalue will be below "pivot" */ /* pivot==begin+1 <=> the first value was the smallest, but the array is not sorted */ /* pivot==end-1 <=> the first element was the biggest */ /* pivot==end <=> the array is sorted */ return pivot; } static void _vec_quicksort_byi64(void* target, size_t elt_size, int64_t* values, size_t len) { size_t pivot; int64_t pivotvalue; int64_t tmp; char part_c; char* target_c; size_t i, j, k0, k1; if (len < 2) return; target_c = target; pivot = 1; pivotvalue = values[0]; for (i = 1;i < len; i++) { if (values[i] <= pivotvalue) { arrswapids(values, i, pivot, tmp); k0 = i*elt_size; k1 = pivot*elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, k0 + j, k1 + j, part_c); } pivot++; } } if (pivot == len) { pivot--; arrswapids(values, 0, pivot, tmp); k1 = pivot*elt_size; for (j = 0;j < elt_size;j++) { arrswapids(target_c, j, k1 + j, part_c); } /* doing this prevents an infinite loop */ } _vec_quicksort_byi64(target, elt_size, values, pivot); _vec_quicksort_byi64((void*)(((char*)target) + elt_size*pivot), elt_size, values + pivot, len - pivot); } void vec_sort_byi64(void* target, size_t elt_size, int64_t* values, size_t numel) { _vec_quicksort_byi64(target, elt_size, values, numel); } /* void vec_quicksort_byi64_parallel(void* target, size_t elt_size, int64_t* values, size_t numel, size_t nthreads) { int* p_activethreads; size_t* nregions; size_t* nalloc_rb; size_t** regionsbuffer; size_t t; *p_activethreads = 1; nregions = (size_t*) calloc(nthreads, sizeof(size_t)); regionsbuffer = (size_t**) malloc(sizeof(size_t*)*nthreads); nalloc_rb = (size_t*) malloc(sizeof(size_t)*nthreads); for (t = 0;t < nthreads;t++) { regionsbuffer[t] = (size_t*) malloc(sizeof(size_t) * 32); nalloc_rb[t] = 16; } nregions[0] = 1; regionsbuffer[0][0] = 0; regionsbuffer[0][1] = numel; #pragma omp parallel { int cthread; int tthread; int stop; int hasnext; size_t begin; size_t end; size_t pivot; size_t cr; size_t startat; size_t ntodelegate; stop = ((*p_activethreads) == 0); cr = 0; while (!stop) { while (nregions[cthread] == 0 && !stop) { stop = ((*p_activethreads) == 0); } if (!stop) { begin = regionsbuffer[cthread][cr * 2]; end = regionsbuffer[cthread][cr * 2 + 1]; pivot = _vec_quicksortbyi64_makepivot(target, elt_size, values, begin, end); if (pivot < end) { /* replace the current region with the region that's on the left of the pivot */ /* and add a new region with everything that's on the right of it *//* if (nalloc_rb[cthread] == nregions[cthread]) { nalloc_rb[cthread] *= 2; regionsbuffer[cthread] = (size_t*)realloc(regionsbuffer[cthread], (sizeof(size_t)*nalloc_rb[cthread]*2)); } regionsbuffer[cthread][nregions[cthread] * 2 + 1] = regionsbuffer[cthread][cr * 2 + 1]; regionsbuffer[cthread][cr * 2 + 1] = pivot; regionsbuffer[cthread][nregions[cthread] * 2] = pivot; #pragma omp critical { if ((*p_activethreads) < nthreads) { /* delegate half of the remaining work to another thread *//* *p_activethreads += 1; for (tthread = 0;tthread < nthreads;tthread++) { if (nregions[tthread] == 0)break; } startat = (nregions[cthread] + 1 - cr) / 2; ntodelegate = nregions[cthread] - startat; while (nalloc_rb[tthread] < ntodelegate) { nalloc_rb[tthread] *= 2; regionsbuffer[tthread] = (size_t*)realloc(regionsbuffer[tthread], (sizeof(size_t)*nalloc_rb[tthread] * 2)); } memcpy(regionsbuffer[tthread], regionsbuffer[cthread] + startat, sizeof(size_t)*ntodelegate); nregions[cthread] -= ntodelegate; (*p_activethreads)++; /* the following will trigger the target thread to stop waiting, so everything needs to be properly initialized at this point *//* nregions[tthread] = ntodelegate; } } } else { cr++; if (cr >= nregions[cthread]) { /* we have completed all the work on this segment *//* cr = 0; nregions[cthread] = 0; #pragma omp atomic (*p_activethreads)--; } } } } free(regionsbuffer); } } /**/ uint64_t* vec_toranks(double* vec, size_t len) { /* this may not be the fastet way to get ranks - but it's easily implmented */ uint64_t* order; uint64_t* result; double* tmpvec; uint64_t i; order = malloc(sizeof(uint64_t)*len); result = malloc(sizeof(uint64_t)*len); tmpvec = vec_cpy(vec, len); for (i = 0;i < (uint64_t)len;i++) { order[i] = i; } vec_sort_by((void*)order, sizeof(uint64_t), vec, len); for (i = 0;i < (uint64_t)len;i++) { result[order[i]] = i; } free(tmpvec); free(order); return result; } double vec_variance(double* X, size_t len) { double* tmpvec; double meanvalue; double squaresum; double result; tmpvec = vec_cpy(X, len); meanvalue = vec_avg(tmpvec, len); vec_dot(tmpvec, tmpvec, len); squaresum = vec_sum(tmpvec, len); result = squaresum / ((double)len) - meanvalue*meanvalue; free(tmpvec); return result; } double vec_variance_weighted(double* X, double* W, size_t len) { double* tmpvec; double meanvalue; double squaresum; double result; tmpvec = vec_cpy(X, len); meanvalue = vec_avg(tmpvec, len); vec_dot(tmpvec, tmpvec, len); vec_dot(tmpvec, W, len); squaresum = vec_sum(tmpvec, len); result = squaresum / ((double)len) - meanvalue*meanvalue; free(tmpvec); return result; } double vec_covariance(double* X, double* Y, size_t len) { double* tmpvec; double meanvalueX,meanvalueY; double squaresum; double result; meanvalueX = vec_avg(X, len); meanvalueY = vec_avg(Y, len); tmpvec = vec_cpy(X, len); vec_dot(tmpvec, Y, len); squaresum = vec_sum(tmpvec, len); result = squaresum / ((double)len) - meanvalueX*meanvalueY; free(tmpvec); return result; } double vec_pearsoncorr(double* X, double* Y, size_t len) { double* tmpvec; double meanvalueX, meanvalueY; double smvX, smvY; /* squared mean value of [] */ double denomX, denomY; double squaresum; double numerator; meanvalueX = vec_avg(X, len); meanvalueY = vec_avg(Y, len); smvX = meanvalueX*meanvalueX; smvY = meanvalueY*meanvalueY; tmpvec = vec_cpy(X, len); vec_dot(tmpvec, Y, len); squaresum = vec_sum(tmpvec, len); numerator = squaresum/((double)len) - meanvalueX*meanvalueY; memcpy(tmpvec, X, sizeof(double)*len); vec_dot(tmpvec, tmpvec, len); denomX = vec_sum(tmpvec, len)/((double)len) - smvX; memcpy(tmpvec, Y, sizeof(double)*len); vec_dot(tmpvec, tmpvec, len); denomY = vec_sum(tmpvec, len)/((double)len) - smvY; free(tmpvec); return numerator / sqrt(denomX*denomY); } double vec_spearmancorr(double* X, double* Y, size_t len) { uint64_t* ranksX; uint64_t* ranksY; double* dranksX; double* dranksY; double result; ranksX = vec_toranks(X, len); ranksY = vec_toranks(Y, len); dranksX = veci64_to_vec(ranksX, len); free(ranksX); dranksY = veci64_to_vec(ranksY, len); free(ranksY); result = vec_pearsoncorr(dranksX, dranksY, len); free(dranksX); free(dranksY); return result; } double vec_manhattandist(double* A, double* B, size_t len) { double result; double delta; size_t i; result = 0.0; for (i = 0;i < len;i++) { delta = A[i] - B[i]; if (delta > 0) result += delta; else result -= delta; } return result; } double vec_euclidiandist(double* A, double* B, size_t len) { double result; double delta; size_t i; result = 0.0; for (i = 0;i < len;i++) { delta = A[i] - B[i]; result += delta * delta; } return sqrt(result); }
GB_dense_ewise3_noaccum_template.c
//------------------------------------------------------------------------------ // GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_unused.h" { //-------------------------------------------------------------------------- // get A, B, and C //-------------------------------------------------------------------------- // any matrix may be aliased to any other (C==A, C==B, and/or A==B) GB_ATYPE *Ax = (GB_ATYPE *) A->x ; GB_BTYPE *Bx = (GB_BTYPE *) B->x ; GB_CTYPE *Cx = (GB_CTYPE *) C->x ; const int64_t cnz = GB_NNZ (C) ; ASSERT (GB_is_dense (A)) ; ASSERT (GB_is_dense (B)) ; ASSERT (GB_is_dense (C)) ; int64_t p ; //-------------------------------------------------------------------------- // C = A+B where all 3 matrices are dense //-------------------------------------------------------------------------- #if GB_CTYPE_IS_BTYPE if (C == B) { //---------------------------------------------------------------------- // C = A+C where A and C are dense //---------------------------------------------------------------------- // C and B cannot be aliased if their types differ #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C += A via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Ax, Cx, nthreads) ; // C += A #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C -= A via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Ax, Cx, nthreads) ; // C -= A #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETA (aij, Ax, p) ; // aij = Ax [p] // Cx [p] = aij + Cx [p] GB_BINOP (GB_CX (p), aij, GB_CX (p), 0, 0) ; } #endif } else #endif #if GB_CTYPE_IS_ATYPE if (C == A) { //---------------------------------------------------------------------- // C = C+B where B and C are dense //---------------------------------------------------------------------- #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C += B via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C -= B via GB_cblas_saxpy or GB_cblas_daxpy GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETB (bij, Bx, p) ; // bij = Bx [p] GB_BINOP (GB_CX (p), GB_CX (p), bij, 0, 0) ; // Cx [p] += bij } #endif } else #endif { //---------------------------------------------------------------------- // C = A+B where all 3 matrices are dense //---------------------------------------------------------------------- // note that A and B may still be aliased to each other #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C = A+B via GB_cblas_saxpy or GB_cblas_daxpy GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C = A-B via GB_cblas_saxpy or GB_cblas_daxpy GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETA (aij, Ax, p) ; // aij = Ax [p] GB_GETB (bij, Bx, p) ; // bij = Bx [p] GB_BINOP (GB_CX (p), aij, bij, 0, 0) ; // Cx [p] = aij + bij } #endif } }
nvptx_asm_delayed_diags.c
// RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -verify -DDIAGS -DIMMEDIATE -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -verify -DDIAGS -DDELAYED -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -fopenmp-version=50 -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -verify=expected,omp5 -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -fopenmp-version=50 %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp5 -DDIAGS -DOMP5 -DIMMEDIATE -fopenmp -fopenmp-version=50 -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp5 -DDIAGS -DOMP5 -DDELAYED -fopenmp -fopenmp-version=50 -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target #ifndef DIAGS // expected-no-diagnostics #endif // DIAGS #ifdef OMP5 void bar(int r) { #ifdef IMMEDIATE // omp5-error@+4 {{invalid input constraint 'mx' in asm}} #endif // IMMEDIATE __asm__("PR3908 %[lf] %[xx] %[li] %[r]" : [ r ] "+r"(r) : [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0))); } #ifdef IMMEDIATE #pragma omp declare target to(bar) device_type(nohost) #else #pragma omp declare target to(bar) device_type(host) #endif // IMMEDIATE #endif // OMP5 void foo(int r) { #ifdef IMMEDIATE // expected-error@+4 {{invalid input constraint 'mx' in asm}} #endif // IMMEDIATE __asm__("PR3908 %[lf] %[xx] %[li] %[r]" : [ r ] "+r"(r) : [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0))); } #ifdef IMMEDIATE #pragma omp declare target to(foo) #endif //IMMEDIATE #ifdef IMMEDIATE #pragma omp declare target #endif //IMMEDIATE void t1(int r) { #ifdef DIAGS // expected-error@+4 {{invalid input constraint 'mx' in asm}} #endif // DIAGS __asm__("PR3908 %[lf] %[xx] %[li] %[r]" : [ r ] "+r"(r) : [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0))); } unsigned t2(signed char input) { unsigned output; #ifdef DIAGS // expected-error@+3 {{invalid output constraint '=a' in asm}} #endif // DIAGS __asm__("xyz" : "=a"(output) : "0"(input)); return output; } double t3(double x) { register long double result; #ifdef DIAGS // expected-error@+3 {{invalid output constraint '=t' in asm}} #endif // DIAGS __asm __volatile("frndint" : "=t"(result) : "0"(x)); return result; } unsigned char t4(unsigned char a, unsigned char b) { unsigned int la = a; unsigned int lb = b; unsigned int bigres; unsigned char res; #ifdef DIAGS // expected-error@+3 {{invalid output constraint '=la' in asm}} #endif // DIAGS __asm__("0:\n1:\n" : [ bigres ] "=la"(bigres) : [ la ] "0"(la), [ lb ] "c"(lb) : "edx", "cc"); res = bigres; return res; } void t5(void) { #ifdef DIAGS // expected-error@+6 {{unknown register name 'st' in asm}} #endif // DIAGS __asm__ __volatile__( "finit" : : : "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "fpsr", "fpcr"); } typedef long long __m256i __attribute__((__vector_size__(32))); void t6(__m256i *p) { #ifdef DIAGS // expected-error@+3 {{unknown register name 'ymm0' in asm}} #endif // DIAGS __asm__ volatile("vmovaps %0, %%ymm0" ::"m"(*(__m256i *)p) : "ymm0"); } #ifdef IMMEDIATE #pragma omp end declare target #endif //IMMEDIATE int main() { #ifdef DELAYED #pragma omp target #endif // DELAYED { #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t1(0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t2(0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t3(0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t4(0, 0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t5(); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t6(0); } return 0; }
tetrahedron_method.c
/* Copyright (C) 2014 Atsushi Togo */ /* All rights reserved. */ /* This file was originally part of spglib and is part of kspclib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* tetrahedron_method.c */ /* Copyright (C) 2014 Atsushi Togo */ #include "tetrahedron_method.h" #include "kgrid.h" #ifdef THMWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif /* 6-------7 */ /* /| /| */ /* / | / | */ /* 4-------5 | */ /* | 2----|--3 */ /* | / | / */ /* |/ |/ */ /* 0-------1 */ /* */ /* i: vec neighbours */ /* 0: O 1, 2, 4 */ /* 1: a 0, 3, 5 */ /* 2: b 0, 3, 6 */ /* 3: a + b 1, 2, 7 */ /* 4: c 0, 5, 6 */ /* 5: c + a 1, 4, 7 */ /* 6: c + b 2, 4, 7 */ /* 7: c + a + b 3, 5, 6 */ static int main_diagonals[4][3] = {{ 1, 1, 1}, /* 0-7 */ {-1, 1, 1}, /* 1-6 */ { 1,-1, 1}, /* 2-5 */ { 1, 1,-1}}; /* 3-4 */ static int db_relative_grid_address[4][24][4][3] = { { { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, {-1, 1, 0}, {-1, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, {-1, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 1, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, {-1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, 1, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 1, -1, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 0, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, -1}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, -1, -1}, { 0, 0, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, -1, -1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, -1, 0}, }, { { 0, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 0, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, -1, 1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, -1, 1}, { 0, 0, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, {-1, 1, -1}, }, { { 0, 0, 0}, {-1, 0, -1}, {-1, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, 1, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 1, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, {-1, 1, 0}, {-1, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 1, -1, 0}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, {-1, 0, 0}, }, }, }; static void get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])); static double get_integration_weight(const double omega, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])); static int get_main_diagonal(THMCONST double rec_lattice[3][3]); static int sort_omegas(double v[4]); static double norm_squared_d3(const double a[3]); static void multiply_matrix_vector_di3(double v[3], THMCONST double a[3][3], const int b[3]); static double _f(const int n, const int m, const double omega, const double vertices_omegas[4]); static double _J(const int i, const int ci, const double omega, const double vertices_omegas[4]); static double _I(const int i, const int ci, const double omega, const double vertices_omegas[4]); static double _n(const int i, const double omega, const double vertices_omegas[4]); static double _g(const int i, const double omega, const double vertices_omegas[4]); static double _n_0(void); static double _n_1(const double omega, const double vertices_omegas[4]); static double _n_2(const double omega, const double vertices_omegas[4]); static double _n_3(const double omega, const double vertices_omegas[4]); static double _n_4(void); static double _g_0(void); static double _g_1(const double omega, const double vertices_omegas[4]); static double _g_2(const double omega, const double vertices_omegas[4]); static double _g_3(const double omega, const double vertices_omegas[4]); static double _g_4(void); static double _J_0(void); static double _J_10(const double omega, const double vertices_omegas[4]); static double _J_11(const double omega, const double vertices_omegas[4]); static double _J_12(const double omega, const double vertices_omegas[4]); static double _J_13(const double omega, const double vertices_omegas[4]); static double _J_20(const double omega, const double vertices_omegas[4]); static double _J_21(const double omega, const double vertices_omegas[4]); static double _J_22(const double omega, const double vertices_omegas[4]); static double _J_23(const double omega, const double vertices_omegas[4]); static double _J_30(const double omega, const double vertices_omegas[4]); static double _J_31(const double omega, const double vertices_omegas[4]); static double _J_32(const double omega, const double vertices_omegas[4]); static double _J_33(const double omega, const double vertices_omegas[4]); static double _J_4(void); static double _I_0(void); static double _I_10(const double omega, const double vertices_omegas[4]); static double _I_11(const double omega, const double vertices_omegas[4]); static double _I_12(const double omega, const double vertices_omegas[4]); static double _I_13(const double omega, const double vertices_omegas[4]); static double _I_20(const double omega, const double vertices_omegas[4]); static double _I_21(const double omega, const double vertices_omegas[4]); static double _I_22(const double omega, const double vertices_omegas[4]); static double _I_23(const double omega, const double vertices_omegas[4]); static double _I_30(const double omega, const double vertices_omegas[4]); static double _I_31(const double omega, const double vertices_omegas[4]); static double _I_32(const double omega, const double vertices_omegas[4]); static double _I_33(const double omega, const double vertices_omegas[4]); static double _I_4(void); void thm_get_relative_grid_address(int relative_grid_address[24][4][3], THMCONST double rec_lattice[3][3]) { int i, j, k, main_diag_index; main_diag_index = get_main_diagonal(rec_lattice); for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 3; k++) { relative_grid_address[i][j][k] = db_relative_grid_address[main_diag_index][i][j][k]; } } } } void thm_get_all_relative_grid_address(int relative_grid_address[4][24][4][3]) { int i, j, k, main_diag_index; for (main_diag_index = 0; main_diag_index < 4; main_diag_index++) { for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 3; k++) { relative_grid_address[main_diag_index][i][j][k] = db_relative_grid_address[main_diag_index][i][j][k]; } } } } } double thm_get_integration_weight(const double omega, THMCONST double tetrahedra_omegas[24][4], const char function) { if (function == 'I') { return get_integration_weight(omega, tetrahedra_omegas, _g, _I); } else { return get_integration_weight(omega, tetrahedra_omegas, _n, _J); } } void thm_get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, THMCONST double tetrahedra_omegas[24][4], const char function) { if (function == 'I') { get_integration_weight_at_omegas(integration_weights, num_omegas, omegas, tetrahedra_omegas, _g, _I); } else { get_integration_weight_at_omegas(integration_weights, num_omegas, omegas, tetrahedra_omegas, _n, _J); } } void thm_get_neighboring_grid_points(int neighboring_grid_points[], const int grid_point, THMCONST int relative_grid_address[][3], const int num_relative_grid_address, const int mesh[3], THMCONST int bz_grid_address[][3], const int bz_map[]) { int bzmesh[3], address_double[3], bz_address_double[3]; int i, j, bz_gp; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { address_double[j] = (bz_grid_address[grid_point][j] + relative_grid_address[i][j]) * 2; bz_address_double[j] = address_double[j]; } bz_gp = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)]; if (bz_gp == -1) { neighboring_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } else { neighboring_grid_points[i] = bz_gp; } } } static void get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])) { int i; //#pragma omp parallel for for (i = 0; i < num_omegas; i++) { integration_weights[i] = get_integration_weight(omegas[i], tetrahedra_omegas, gn, IJ); } } static double get_integration_weight(const double omega, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])) { int i, j, ci; double sum; double v[4]; sum = 0; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { v[j] = tetrahedra_omegas[i][j]; } ci = sort_omegas(v); if (omega < v[0]) { sum += IJ(0, ci, omega, v) * gn(0, omega, v); } else { if (v[0] < omega && omega < v[1]) { sum += IJ(1, ci, omega, v) * gn(1, omega, v); } else { if (v[1] < omega && omega < v[2]) { sum += IJ(2, ci, omega, v) * gn(2, omega, v); } else { if (v[2] < omega && omega < v[3]) { sum += IJ(3, ci, omega, v) * gn(3, omega, v); } else { if (v[3] < omega) { sum += IJ(4, ci, omega, v) * gn(4, omega, v); } } } } } } return sum / 6; } static int sort_omegas(double v[4]) { int i; double w[4]; i = 0; if (v[0] > v[1]) { w[0] = v[1]; w[1] = v[0]; i = 1; } else { w[0] = v[0]; w[1] = v[1]; } if (v[2] > v[3]) { w[2] = v[3]; w[3] = v[2]; } else { w[2] = v[2]; w[3] = v[3]; } if (w[0] > w[2]) { v[0] = w[2]; v[1] = w[0]; if (i == 0) { i = 4; } } else { v[0] = w[0]; v[1] = w[2]; } if (w[1] > w[3]) { v[3] = w[1]; v[2] = w[3]; if (i == 1) { i = 3; } } else { v[3] = w[3]; v[2] = w[1]; if (i == 1) { i = 5; } } if (v[1] > v[2]) { w[1] = v[1]; v[1] = v[2]; v[2] = w[1]; if (i == 4) { i = 2; } if (i == 5) { i = 1; } } else { if (i == 4) { i = 1; } if (i == 5) { i = 2; } } return i; } static int get_main_diagonal(THMCONST double rec_lattice[3][3]) { int i, shortest; double length, min_length; double main_diag[3]; shortest = 0; multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[0]); min_length = norm_squared_d3(main_diag); for (i = 1; i < 4; i++) { multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[i]); length = norm_squared_d3(main_diag); if (min_length > length) { min_length = length; shortest = i; } } return shortest; } static double norm_squared_d3(const double a[3]) { return a[0] * a[0] + a[1] * a[1] + a[2] * a[2]; } static void multiply_matrix_vector_di3(double v[3], THMCONST double a[3][3], const int b[3]) { int i; double c[3]; for (i = 0; i < 3; i++) { c[i] = a[i][0] * b[0] + a[i][1] * b[1] + a[i][2] * b[2]; } for (i = 0; i < 3; i++) { v[i] = c[i]; } } static double _f(const int n, const int m, const double omega, const double vertices_omegas[4]) { return ((omega - vertices_omegas[m]) / (vertices_omegas[n] - vertices_omegas[m])); } static double _J(const int i, const int ci, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _J_0(); case 1: switch (ci) { case 0: return _J_10(omega, vertices_omegas); case 1: return _J_11(omega, vertices_omegas); case 2: return _J_12(omega, vertices_omegas); case 3: return _J_13(omega, vertices_omegas); } case 2: switch (ci) { case 0: return _J_20(omega, vertices_omegas); case 1: return _J_21(omega, vertices_omegas); case 2: return _J_22(omega, vertices_omegas); case 3: return _J_23(omega, vertices_omegas); } case 3: switch (ci) { case 0: return _J_30(omega, vertices_omegas); case 1: return _J_31(omega, vertices_omegas); case 2: return _J_32(omega, vertices_omegas); case 3: return _J_33(omega, vertices_omegas); } case 4: return _J_4(); } warning_print("******* Warning *******\n"); warning_print(" J is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _I(const int i, const int ci, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _I_0(); case 1: switch (ci) { case 0: return _I_10(omega, vertices_omegas); case 1: return _I_11(omega, vertices_omegas); case 2: return _I_12(omega, vertices_omegas); case 3: return _I_13(omega, vertices_omegas); } case 2: switch (ci) { case 0: return _I_20(omega, vertices_omegas); case 1: return _I_21(omega, vertices_omegas); case 2: return _I_22(omega, vertices_omegas); case 3: return _I_23(omega, vertices_omegas); } case 3: switch (ci) { case 0: return _I_30(omega, vertices_omegas); case 1: return _I_31(omega, vertices_omegas); case 2: return _I_32(omega, vertices_omegas); case 3: return _I_33(omega, vertices_omegas); } case 4: return _I_4(); } warning_print("******* Warning *******\n"); warning_print(" I is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _n(const int i, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _n_0(); case 1: return _n_1(omega, vertices_omegas); case 2: return _n_2(omega, vertices_omegas); case 3: return _n_3(omega, vertices_omegas); case 4: return _n_4(); } warning_print("******* Warning *******\n"); warning_print(" n is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _g(const int i, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _g_0(); case 1: return _g_1(omega, vertices_omegas); case 2: return _g_2(omega, vertices_omegas); case 3: return _g_3(omega, vertices_omegas); case 4: return _g_4(); } warning_print("******* Warning *******\n"); warning_print(" g is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } /* omega < omega1 */ static double _n_0(void) { return 0.0; } /* omega1 < omega < omega2 */ static double _n_1(const double omega, const double vertices_omegas[4]) { return (_f(1, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(3, 0, omega, vertices_omegas)); } /* omega2 < omega < omega3 */ static double _n_2(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas)); } /* omega2 < omega < omega3 */ static double _n_3(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)); } /* omega4 < omega */ static double _n_4(void) { return 1.0; } /* omega < omega1 */ static double _g_0(void) { return 0.0; } /* omega1 < omega < omega2 */ static double _g_1(const double omega, const double vertices_omegas[4]) { return (3 * _f(1, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) / (vertices_omegas[3] - vertices_omegas[0])); } /* omega2 < omega < omega3 */ static double _g_2(const double omega, const double vertices_omegas[4]) { return (3 / (vertices_omegas[3] - vertices_omegas[0]) * (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))); } /* omega3 < omega < omega4 */ static double _g_3(const double omega, const double vertices_omegas[4]) { return (3 * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) / (vertices_omegas[3] - vertices_omegas[0])); } /* omega4 < omega */ static double _g_4(void) { return 0.0; } static double _J_0(void) { return 0.0; } static double _J_10(const double omega, const double vertices_omegas[4]) { return (1.0 + _f(0, 1, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) + _f(0, 3, omega, vertices_omegas)) / 4; } static double _J_11(const double omega, const double vertices_omegas[4]) { return _f(1, 0, omega, vertices_omegas) / 4; } static double _J_12(const double omega, const double vertices_omegas[4]) { return _f(2, 0, omega, vertices_omegas) / 4; } static double _J_13(const double omega, const double vertices_omegas[4]) { return _f(3, 0, omega, vertices_omegas) / 4; } static double _J_20(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (1.0 + _f(0, 3, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * (1.0 + _f(0, 3, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas); } static double _J_21(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (1.0 + _f(1, 3, omega, vertices_omegas) + _f(1, 2, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (_f(1, 3, omega, vertices_omegas) + _f(1, 2, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas); } static double _J_22(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * (_f(2, 1, omega, vertices_omegas) + _f(2, 0, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas); } static double _J_23(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(3, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (_f(3, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * _f(3, 0, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas); } static double _J_30(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_31(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_32(const double omega, const double vertices_omegas[4]) { return (1.0 + _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_33(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) * (1.0 + _f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) + _f(3, 2, omega, vertices_omegas))) / 4 / _n_3(omega, vertices_omegas); } static double _J_4(void) { return 0.25; } static double _I_0(void) { return 0.0; } static double _I_10(const double omega, const double vertices_omegas[4]) { return (_f(0, 1, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) + _f(0, 3, omega, vertices_omegas)) / 3; } static double _I_11(const double omega, const double vertices_omegas[4]) { return _f(1, 0, omega, vertices_omegas) / 3; } static double _I_12(const double omega, const double vertices_omegas[4]) { return _f(2, 0, omega, vertices_omegas) / 3; } static double _I_13(const double omega, const double vertices_omegas[4]) { return _f(3, 0, omega, vertices_omegas) / 3; } static double _I_20(const double omega, const double vertices_omegas[4]) { return (_f(0, 3, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_21(const double omega, const double vertices_omegas[4]) { return (_f(1, 2, omega, vertices_omegas) + _f(1, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_22(const double omega, const double vertices_omegas[4]) { return (_f(2, 1, omega, vertices_omegas) + _f(2, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_23(const double omega, const double vertices_omegas[4]) { return (_f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_30(const double omega, const double vertices_omegas[4]) { return _f(0, 3, omega, vertices_omegas) / 3; } static double _I_31(const double omega, const double vertices_omegas[4]) { return _f(1, 3, omega, vertices_omegas) / 3; } static double _I_32(const double omega, const double vertices_omegas[4]) { return _f(2, 3, omega, vertices_omegas) / 3; } static double _I_33(const double omega, const double vertices_omegas[4]) { return (_f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) + _f(3, 2, omega, vertices_omegas)) / 3; } static double _I_4(void) { return 0.0; }
kernel_cpu.balance.c
#include "hclib.h" int ____num_tasks[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; // #ifdef __cplusplus // extern "C" { // #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <omp.h> // (in directory known to compiler) needed by openmp #include <stdlib.h> // (in directory known to compiler) needed by malloc #include <stdio.h> // (in directory known to compiler) needed by printf, stderr //======================================================================================================================================================150 // COMMON //======================================================================================================================================================150 #include "common.h" // (in directory provided here) //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "timer.h" // (in directory provided here) //========================================================================================================================================================================================================200 // KERNEL_CPU FUNCTION //========================================================================================================================================================================================================200 void kernel_cpu( int cores_arg, record *records, knode *knodes, long knodes_elem, int order, long maxheight, int count, long *currKnode, long *offset, int *keys, record *ans) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // timer long long time0; long long time1; long long time2; time0 = get_time(); //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 int threadsPerBlock; threadsPerBlock = order < 1024 ? order : 1024; { time1 = get_time(); //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 // private thread IDs int thid; int bid; int i; // process number of querries #pragma omp parallel for private (i, thid) for(bid = 0; bid < count; bid++){ ____num_tasks[omp_get_thread_num()]++; { // process levels of the tree for(i = 0; i < maxheight; i++){ // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ // if value is between the two keys if((knodes[currKnode[bid]].keys[thid]) <= keys[bid] && (knodes[currKnode[bid]].keys[thid+1] > keys[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that addresses knodes[] in the next iteration goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodes[offset[bid]].indices[thid] < knodes_elem){ offset[bid] = knodes[offset[bid]].indices[thid]; } } } // set for next tree level currKnode[bid] = offset[bid]; } //At this point, we have a candidate leaf node which may contain //the target record. Check each key to hopefully find the record // process all leaves at each level for(thid = 0; thid < threadsPerBlock; thid++){ if(knodes[currKnode[bid]].keys[thid] == keys[bid]){ ans[bid].value = records[knodes[currKnode[bid]].indices[thid]].value; } } } ; } time2 = get_time(); } //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of CPU/MCPU KERNEL:\n"); printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time2-time0) * 100); printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time2-time0) * 100); printf("Total time:\n"); printf("%.12f s\n", (float) (time2-time0) / 1000000); } //========================================================================================================================================================================================================200 // END //========================================================================================================================================================================================================200 // #ifdef __cplusplus // } // #endif
algorithm.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_ALGORITHM_H_ #define CORE_ALGORITHM_H_ #include <cmath> #include <cstdint> namespace bdm { // ----------------------------------------------------------------------------- /// Calculate work-efficient inclusive prefix sum. /// Calculation is parallel and in-place. template <typename T> void InPlaceParallelPrefixSum(T& v, uint64_t n) { if (n < 2) { return; } // upsweep uint64_t logn = static_cast<uint64_t>(std::ceil(std::log2(n))); for (uint64_t d = 0; d < logn; ++d) { uint64_t stride = 1 << (d + 1); uint64_t delta = 1 << d; #pragma omp parallel for for (uint64_t i = delta - 1; i < n - delta; i += stride) { v[i + delta] += v[i]; } } // downsweep for (uint64_t d = 0; d < logn - 1; ++d) { uint64_t stride = 1 << (logn - d - 1); uint64_t delta = 1 << (logn - d - 2); #pragma omp parallel for for (uint64_t i = stride - 1; i < n - delta; i += stride) { v[i + delta] += v[i]; } } } // ----------------------------------------------------------------------------- /// Calculate exclusive prefix sum in-place. /// n must be <= t->size() - 1 /// This means that there must be an additional element in the vector at v[n+1] template <typename T> void ExclusivePrefixSum(T* v, uint64_t n) { auto tmp = (*v)[0]; (*v)[0] = 0; for (uint64_t i = 1; i <= n; ++i) { auto result = (*v)[i - 1] + tmp; tmp = (*v)[i]; (*v)[i] = result; } } // ----------------------------------------------------------------------------- // if search_val is found in container, return right-most occurence. // If not return the index of the right-most element that is smaller. // If no smaller element exists, return element at index 0 template <typename TSearch, typename TContainer> uint64_t BinarySearch(const TSearch& search_val, const TContainer& container, uint64_t from, uint64_t to) { if (to <= from) { if (container[from] != search_val && from > 0) { // if (from < container.size() && container[from] != search_val && from > // 0) { from--; } return from; } auto m = (from + to) / 2; if (container[m] == search_val) { if (m + 1 <= to && container[m + 1] == search_val) { return BinarySearch(search_val, container, m + 1, to); } return m; } else if (container[m] > search_val) { return BinarySearch(search_val, container, from, m); } else { return BinarySearch(search_val, container, m + 1, to); } } } // namespace bdm #endif // CORE_ALGORITHM_H_
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p, *magick_restrict q; register Quantum *magick_restrict r; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); if ((distance*distance) > fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=(double) MagickMin(GetPixelChannels(image), GetPixelChannels(reconstruct_image))* GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, distance, Sa; MagickBooleanType difference; register ssize_t i; difference=MagickFalse; distance=0.0; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance+=pixel*pixel; if (distance > fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; register ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; register ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) p[i]- GetPixelChannel(reconstruct_image,channel,q)); else distance=QuantumScale*fabs(Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q)); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i])); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; register ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; register ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView *image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo *kernel_info; MagickBooleanType status; register ssize_t i; size_t columns, rows; ssize_t y; /* Compute structural similarity index @ https://en.wikipedia.org/wiki/Structural_similarity. */ radius=SSIMRadius; artifact=GetImageArtifact(image,"compare:ssim-radius"); if (artifact != (const char *) NULL) radius=StringToDouble(artifact,(char **) NULL); sigma=SSIMSigma; artifact=GetImageArtifact(image,"compare:ssim-sigma"); if (artifact != (const char *) NULL) sigma=StringToDouble(artifact,(char **) NULL); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); c1=pow(SSIMK1*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k1"); if (artifact != (const char *) NULL) c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); c2=pow(SSIMK2*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k2"); if (artifact != (const char *) NULL) c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y- ((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/ 2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels+1], x_pixel_sigma_squared[MaxPixelChannels+1], xy_sigma[MaxPixelChannels+1], y_pixel_mu[MaxPixelChannels+1], y_pixel_sigma_squared[MaxPixelChannels+1]; register const Quantum *magick_restrict reference, *magick_restrict target; register double *k; ssize_t v; (void) memset(x_pixel_mu,0,sizeof(x_pixel_mu)); (void) memset(x_pixel_sigma_squared,0, sizeof(x_pixel_sigma_squared)); (void) memset(xy_sigma,0,sizeof(xy_sigma)); (void) memset(x_pixel_sigma_squared,0, sizeof(y_pixel_sigma_squared)); (void) memset(y_pixel_mu,0,sizeof(y_pixel_mu)); (void) memset(y_pixel_sigma_squared,0, sizeof(y_pixel_sigma_squared)); k=kernel_info->values; reference=p; target=q; for (v=0; v < (ssize_t) kernel_info->height; v++) { register ssize_t u; for (u=0; u < (ssize_t) kernel_info->width; u++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel=QuantumScale*reference[i]; x_pixel_mu[i]+=(*k)*x_pixel; x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel; y_pixel=QuantumScale* GetPixelChannel(reconstruct_image,channel,target); y_pixel_mu[i]+=(*k)*y_pixel; y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel; xy_sigma[i]+=(*k)*x_pixel*y_pixel; } k++; reference+=GetPixelChannels(image); target+=GetPixelChannels(reconstruct_image); } reference+=GetPixelChannels(image)*columns; target+=GetPixelChannels(reconstruct_image)*columns; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i]; y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i]; xy_mu=x_pixel_mu[i]*y_pixel_mu[i]; xy_sigmas=xy_sigma[i]-xy_mu; x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared; y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared; ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/ ((x_pixel_mu_squared+y_pixel_mu_squared+c1)* (x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2)); channel_distortion[i]+=ssim; channel_distortion[CompositePixelChannel]+=ssim; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i=0; i <= MaxPixelChannels; i++) distortion[i]+=channel_distortion[i]; } image_view=DestroyCacheView(image_view); reconstruct_view=DestroyCacheView(reconstruct_view); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i]/=((double) columns*rows); } distortion[CompositePixelChannel]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); kernel_info=DestroyKernelInfo(kernel_info); return(status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; register ssize_t i; status=GetStructuralSimilarityDistortion(image,reconstruct_image, distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=(1.0-(distortion[i]))/2.0; return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image, channel,q)); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif proceed=SetImageProgress(image,SimilarityImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
naive.c
#include "constants.h" /* Lower bound on timings by a single memcpy * gives ~1.19 seconds for 10 iterations, 1757 MB /0.11s ~= 1600 MB/s [root@laptopjisk ~]# dmidecode -t 17 # dmidecode 3.1 Getting SMBIOS data from sysfs. SMBIOS 3.0.0 present. Handle 0x0039, DMI type 17, 40 bytes Memory Device Array Handle: 0x0038 Error Information Handle: Not Provided Total Width: 64 bits Data Width: 64 bits Size: 8192 MB Form Factor: Row Of Chips Set: None Locator: System Board Memory Bank Locator: BANK 0 Type: LPDDR3 Type Detail: Synchronous Unbuffered (Unregistered) Speed: 1867 MT/s Manufacturer: Micron Serial Number: 00000000 Asset Tag: 9876543210 Part Number: MT52L1G32D4PG-107 Rank: 2 Configured Clock Speed: 1867 MT/s Minimum Voltage: 1.25 V Maximum Voltage: 1.25 V Configured Voltage: 1.2 V */ /** * Deinterleave (transpose) an IQUV ring buffer page to the ordering needed for FITS files * Note that this is probably a slow function, and is not meant to be run real-time * * data in: tab, channel/4, time/500 packets of time,channel,pn * data out: tab, channel, pol, time * * Suggested use is: * 1. realtime: ringbuffer -> [trigger] -> dada_dbdisk * 2. offline: dada_dbdisk -> ringbuffer -> dadafits * * @param {const unsigned char *} page Ringbuffer page with interleaved data * @param {int} ntabs Number of tabs * @param {int} nchannels Number of channels * @param {int} npackets Number of packets per sequence */ void deinterleave(unsigned char * restrict const page, unsigned char * restrict const transposed, const int ntabs, const int nchannels, const int npackets) { // and find the matching address in the transposed buffer const int ni = ntabs * nchannels / NCHANS; const int nj = npackets * NSAMPS; const int nk = NCHANS * NPOLS; int i = 0; #pragma omp parallel for for (i = 0; i < ni; i++) { int j; for (j = 0; j < nj; j++) { int k = 0; for (k = 0; k < nk; k++) { transposed[(i * nk + k) * nj + j] = page[(i * nj + j) * nk + k]; } } } }
bucle-for.c
/* gcc -fopenmp -O2 src/bucle-for.c -o bin/bucle-for ./bin/bucle-for 8 */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n = 9; if(argc < 2) { fprintf(stderr,"\n[ERROR] - Falta no iteraciones \n"); exit(-1); } n = atoi(argv[1]); #pragma omp parallel { #pragma omp for for (i=0; i<n; i++) printf("thread %d ejecuta la iteración %d del bucle\n", omp_get_thread_num(),i); } return(0); }
dynwave.c
//----------------------------------------------------------------------------- // dynwave.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (5.1.001) // 03/28/14 (5.1.002) // 09/15/14 (5.1.007) // 03/19/15 (5.1.008) // 08/01/16 (5.1.011) // Author: L. Rossman (EPA) // M. Tryby (EPA) // R. Dickinson (CDM) // // Dynamic wave flow routing functions. // // This module solves the dynamic wave flow routing equations using // Picard Iterations (i.e., a method of successive approximations) // to solve the explicit form of the continuity and momentum equations // for conduits. // // Build 5.1.002: // - Only non-ponded nodal surface area is saved for use in // surcharge algorithm. // // Build 5.1.007: // - Node losses added to node outflow variable instead of treated // as a separate item when computing change in node flow volume. // // Build 5.1.008: // - Module-specific constants moved here from project.c. // - Support added for user-specified minimum variable time step. // - Node crown elevations found here instead of in flowrout.c module. // - OpenMP use to parallelize findLinkFlows() & findNodeDepths(). // - Bug in finding complete list of capacity limited links fixed. // // Build 5.1.011: // - Added test for failed memory allocation. // - Fixed illegal array index bug for Ideal Pumps. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include "headers.h" #include <stdlib.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> //(5.1.008) #endif //----------------------------------------------------------------------------- // Constants //----------------------------------------------------------------------------- static const double MINTIMESTEP = 0.001; // min. time step (sec) //(5.1.008) static const double OMEGA = 0.5; // under-relaxation parameter // Constants moved here from project.c // //(5.1.008) const double DEFAULT_SURFAREA = 12.566; // Min. nodal surface area (~4 ft diam.) const double DEFAULT_HEADTOL = 0.005; // Default head tolerance (ft) const int DEFAULT_MAXTRIALS = 8; // Max. trials per time step //----------------------------------------------------------------------------- // Data Structures //----------------------------------------------------------------------------- typedef struct { char converged; // TRUE if iterations for a node done double newSurfArea; // current surface area (ft2) double oldSurfArea; // previous surface area (ft2) double sumdqdh; // sum of dqdh from adjoining links double dYdT; // change in depth w.r.t. time (ft/sec) } TXnode; //----------------------------------------------------------------------------- // Shared Variables //----------------------------------------------------------------------------- static double VariableStep; // size of variable time step (sec) static TXnode* Xnode; // extended nodal information static double Omega; // actual under-relaxation parameter static int Steps; // number of Picard iterations //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initRoutingStep(void); static void initNodeStates(void); static void findBypassedLinks(); static void findLimitedLinks(); static void findLinkFlows(double dt); static int isTrueConduit(int link); static void findNonConduitFlow(int link, double dt); static void findNonConduitSurfArea(int link); static double getModPumpFlow(int link, double q, double dt); static void updateNodeFlows(int link); static int findNodeDepths(double dt); static void setNodeDepth(int node, double dt); static double getFloodedDepth(int node, int canPond, double dV, double yNew, double yMax, double dt); static double getVariableStep(double maxStep); static double getLinkStep(double tMin, int *minLink); static double getNodeStep(double tMin, int *minNode); //============================================================================= //// This function was modified for release 5.1.008. //// //(5.1.008) void dynwave_init() // // Input: none // Output: none // Purpose: initializes dynamic wave routing method. // { int i, j; double z; VariableStep = 0.0; Xnode = (TXnode *) calloc(Nobjects[NODE], sizeof(TXnode)); //// Added to release 5.1.011. //// //(5.1.011) if ( Xnode == NULL ) { report_writeErrorMsg(ERR_MEMORY, " Not enough memory for dynamic wave routing."); return; } ////////////////////////////////////// // --- initialize node surface areas & crown elev. for (i = 0; i < Nobjects[NODE]; i++ ) { Xnode[i].newSurfArea = 0.0; Xnode[i].oldSurfArea = 0.0; Node[i].crownElev = Node[i].invertElev; } // --- update node crown elev. & initialize links for (i = 0; i < Nobjects[LINK]; i++) { j = Link[i].node1; z = Node[j].invertElev + Link[i].offset1 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); j = Link[i].node2; z = Node[j].invertElev + Link[i].offset2 + Link[i].xsect.yFull; Node[j].crownElev = MAX(Node[j].crownElev, z); Link[i].flowClass = DRY; Link[i].dqdh = 0.0; } } //============================================================================= void dynwave_close() // // Input: none // Output: none // Purpose: frees memory allocated for dynamic wave routing method. // { FREE(Xnode); } //============================================================================= //// New function added to release 5.1.008. //// //(5.1.008) void dynwave_validate() // // Input: none // Output: none // Purpose: adjusts dynamic wave routing options. // { if ( MinRouteStep > RouteStep ) MinRouteStep = RouteStep; if ( MinRouteStep < MINTIMESTEP ) MinRouteStep = MINTIMESTEP; if ( MinSurfArea == 0.0 ) MinSurfArea = DEFAULT_SURFAREA; else MinSurfArea /= UCF(LENGTH) * UCF(LENGTH); if ( HeadTol == 0.0 ) HeadTol = DEFAULT_HEADTOL; else HeadTol /= UCF(LENGTH); if ( MaxTrials == 0 ) MaxTrials = DEFAULT_MAXTRIALS; } //============================================================================= double dynwave_getRoutingStep(double fixedStep) // // Input: fixedStep = user-supplied fixed time step (sec) // Output: returns routing time step (sec) // Purpose: computes variable routing time step if applicable. // { // --- use user-supplied fixed step if variable step option turned off // or if its smaller than the min. allowable variable time step if ( CourantFactor == 0.0 ) return fixedStep; if ( fixedStep < MINTIMESTEP ) return fixedStep; // --- at start of simulation (when current variable step is zero) // use the minimum allowable time step if ( VariableStep == 0.0 ) { VariableStep = MinRouteStep; //(5.1.008) } // --- otherwise compute variable step based on current flow solution else VariableStep = getVariableStep(fixedStep); // --- adjust step to be a multiple of a millisecond VariableStep = floor(1000.0 * VariableStep) / 1000.0; return VariableStep; } //============================================================================= int dynwave_execute(double tStep) // // Input: links = array of topo sorted links indexes // tStep = time step (sec) // Output: returns number of iterations used // Purpose: routes flows through drainage network over current time step. // { int converged; // --- initialize if ( ErrorCode ) return 0; Steps = 0; converged = FALSE; Omega = OMEGA; initRoutingStep(); // --- keep iterating until convergence while ( Steps < MaxTrials ) { // --- execute a routing step & check for nodal convergence initNodeStates(); findLinkFlows(tStep); converged = findNodeDepths(tStep); Steps++; if ( Steps > 1 ) { if ( converged ) break; // --- check if link calculations can be skipped in next step findBypassedLinks(); } } if ( !converged ) NonConvergeCount++; // --- identify any capacity-limited conduits findLimitedLinks(); return Steps; } //============================================================================= void initRoutingStep() { int i; for (i = 0; i < Nobjects[NODE]; i++) { Xnode[i].converged = FALSE; Xnode[i].dYdT = 0.0; } for (i = 0; i < Nobjects[LINK]; i++) { Link[i].bypassed = FALSE; Link[i].surfArea1 = 0.0; Link[i].surfArea2 = 0.0; } // --- a2 preserves conduit area from solution at last time step for ( i = 0; i < Nlinks[CONDUIT]; i++) Conduit[i].a2 = Conduit[i].a1; } //============================================================================= void initNodeStates() // // Input: none // Output: none // Purpose: initializes node's surface area, inflow & outflow // { int i; for (i = 0; i < Nobjects[NODE]; i++) { // --- initialize nodal surface area if ( AllowPonding ) { Xnode[i].newSurfArea = node_getPondedArea(i, Node[i].newDepth); } else { Xnode[i].newSurfArea = node_getSurfArea(i, Node[i].newDepth); } if ( Xnode[i].newSurfArea < MinSurfArea ) { Xnode[i].newSurfArea = MinSurfArea; } //// Following code section modified for release 5.1.007 //// //(5.1.007) // --- initialize nodal inflow & outflow Node[i].inflow = 0.0; Node[i].outflow = Node[i].losses; if ( Node[i].newLatFlow >= 0.0 ) { Node[i].inflow += Node[i].newLatFlow; } else { Node[i].outflow -= Node[i].newLatFlow; } Xnode[i].sumdqdh = 0.0; } } //============================================================================= void findBypassedLinks() { int i; for (i = 0; i < Nobjects[LINK]; i++) { if ( Xnode[Link[i].node1].converged && Xnode[Link[i].node2].converged ) Link[i].bypassed = TRUE; else Link[i].bypassed = FALSE; } } //============================================================================= void findLimitedLinks() // // Input: none // Output: none // Purpose: determines if a conduit link is capacity limited. // { int j, n1, n2, k; double h1, h2; for (j = 0; j < Nobjects[LINK]; j++) { // ---- check only non-dummy conduit links if ( !isTrueConduit(j) ) continue; //(5.1.008) // --- check that upstream end is full k = Link[j].subIndex; Conduit[k].capacityLimited = FALSE; if ( Conduit[k].a1 >= Link[j].xsect.aFull ) { // --- check if HGL slope > conduit slope n1 = Link[j].node1; n2 = Link[j].node2; h1 = Node[n1].newDepth + Node[n1].invertElev; h2 = Node[n2].newDepth + Node[n2].invertElev; if ( (h1 - h2) > fabs(Conduit[k].slope) * Conduit[k].length ) Conduit[k].capacityLimited = TRUE; } } } //============================================================================= void findLinkFlows(double dt) { int i; // --- find new flow in each non-dummy conduit #pragma omp parallel num_threads(NumThreads) //(5.1.008) { #pragma omp for //(5.1.008) for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) && !Link[i].bypassed ) dwflow_findConduitFlow(i, Steps, Omega, dt); } } // --- update inflow/outflows for nodes attached to non-dummy conduits for ( i = 0; i < Nobjects[LINK]; i++) { if ( isTrueConduit(i) ) updateNodeFlows(i); } // --- find new flows for all dummy conduits, pumps & regulators for ( i = 0; i < Nobjects[LINK]; i++) { if ( !isTrueConduit(i) ) { if ( !Link[i].bypassed ) findNonConduitFlow(i, dt); updateNodeFlows(i); } } } //============================================================================= int isTrueConduit(int j) { return ( Link[j].type == CONDUIT && Link[j].xsect.type != DUMMY ); } //============================================================================= void findNonConduitFlow(int i, double dt) // // Input: i = link index // dt = time step (sec) // Output: none // Purpose: finds new flow in a non-conduit-type link // { double qLast; // previous link flow (cfs) double qNew; // new link flow (cfs) // --- get link flow from last iteration qLast = Link[i].newFlow; Link[i].dqdh = 0.0; // --- get new inflow to link from its upstream node // (link_getInflow returns 0 if flap gate closed or pump is offline) qNew = link_getInflow(i); if ( Link[i].type == PUMP ) qNew = getModPumpFlow(i, qNew, dt); // --- find surface area at each end of link findNonConduitSurfArea(i); // --- apply under-relaxation with flow from previous iteration; // --- do not allow flow to change direction without first being 0 if ( Steps > 0 && Link[i].type != PUMP ) { qNew = (1.0 - Omega) * qLast + Omega * qNew; if ( qNew * qLast < 0.0 ) qNew = 0.001 * SGN(qNew); } Link[i].newFlow = qNew; } //============================================================================= double getModPumpFlow(int i, double q, double dt) // // Input: i = link index // q = pump flow from pump curve (cfs) // dt = time step (sec) // Output: returns modified pump flow rate (cfs) // Purpose: modifies pump curve pumping rate depending on amount of water // available at pump's inlet node. // { int j = Link[i].node1; // pump's inlet node index int k = Link[i].subIndex; // pump's index double newNetInflow; // inflow - outflow rate (cfs) double netFlowVolume; // inflow - outflow volume (ft3) double y; // node depth (ft) if ( q == 0.0 ) return q; // --- case where inlet node is a storage node: // prevent node volume from going negative if ( Node[j].type == STORAGE ) return node_getMaxOutflow(j, q, dt); // --- case where inlet is a non-storage node switch ( Pump[k].type ) { // --- for Type1 pump, a volume is computed for inlet node, // so make sure it doesn't go negative case TYPE1_PUMP: return node_getMaxOutflow(j, q, dt); // --- for other types of pumps, if pumping rate would make depth // at upstream node negative, then set pumping rate = inflow case TYPE2_PUMP: case TYPE4_PUMP: case TYPE3_PUMP: newNetInflow = Node[j].inflow - Node[j].outflow - q; netFlowVolume = 0.5 * (Node[j].oldNetInflow + newNetInflow ) * dt; y = Node[j].oldDepth + netFlowVolume / Xnode[j].newSurfArea; if ( y <= 0.0 ) return Node[j].inflow; } return q; } //============================================================================= void findNonConduitSurfArea(int i) // // Input: i = link index // Output: none // Purpose: finds the surface area contributed by a non-conduit // link to its upstream and downstream nodes. // { if ( Link[i].type == ORIFICE ) { Link[i].surfArea1 = Orifice[Link[i].subIndex].surfArea / 2.; } // --- no surface area for weirs to maintain SWMM 4 compatibility /* else if ( Link[i].type == WEIR ) { Xlink[i].surfArea1 = Weir[Link[i].subIndex].surfArea / 2.; } */ else Link[i].surfArea1 = 0.0; Link[i].surfArea2 = Link[i].surfArea1; if ( Link[i].flowClass == UP_CRITICAL || Node[Link[i].node1].type == STORAGE ) Link[i].surfArea1 = 0.0; if ( Link[i].flowClass == DN_CRITICAL || Node[Link[i].node2].type == STORAGE ) Link[i].surfArea2 = 0.0; } //============================================================================= void updateNodeFlows(int i) // // Input: i = link index // q = link flow rate (cfs) // Output: none // Purpose: updates cumulative inflow & outflow at link's end nodes. // { int k; //(5.1.011) int barrels = 1; int n1 = Link[i].node1; int n2 = Link[i].node2; double q = Link[i].newFlow; double uniformLossRate = 0.0; // --- compute any uniform seepage loss from a conduit if ( Link[i].type == CONDUIT ) { k = Link[i].subIndex; uniformLossRate = Conduit[k].evapLossRate + Conduit[k].seepLossRate; barrels = Conduit[k].barrels; } // --- update total inflow & outflow at upstream/downstream nodes if ( q >= 0.0 ) { Node[n1].outflow += q + uniformLossRate; Node[n2].inflow += q; } else { Node[n1].inflow -= q; Node[n2].outflow -= q - uniformLossRate; } // --- add surf. area contributions to upstream/downstream nodes Xnode[Link[i].node1].newSurfArea += Link[i].surfArea1 * barrels; Xnode[Link[i].node2].newSurfArea += Link[i].surfArea2 * barrels; // --- update summed value of dqdh at each end node Xnode[Link[i].node1].sumdqdh += Link[i].dqdh; if ( Link[i].type == PUMP ) { k = Link[i].subIndex; if ( Pump[k].type != TYPE4_PUMP ) //(5.1.011) { Xnode[n2].sumdqdh += Link[i].dqdh; } } else Xnode[n2].sumdqdh += Link[i].dqdh; } //============================================================================= int findNodeDepths(double dt) { int i; int converged; // convergence flag double yOld; // previous node depth (ft) // --- compute outfall depths based on flow in connecting link for ( i = 0; i < Nobjects[LINK]; i++ ) link_setOutfallDepth(i); // --- compute new depth for all non-outfall nodes and determine if // depth change from previous iteration is below tolerance converged = TRUE; #pragma omp parallel num_threads(NumThreads) //(5.1.008) { #pragma omp for private(yOld) //(5.1.008) for ( i = 0; i < Nobjects[NODE]; i++ ) { if ( Node[i].type == OUTFALL ) continue; yOld = Node[i].newDepth; setNodeDepth(i, dt); Xnode[i].converged = TRUE; if ( fabs(yOld - Node[i].newDepth) > HeadTol ) { converged = FALSE; Xnode[i].converged = FALSE; } } } //(5.1.008) return converged; } //============================================================================= void setNodeDepth(int i, double dt) // // Input: i = node index // dt = time step (sec) // Output: none // Purpose: sets depth at non-outfall node after current time step. // { int canPond; // TRUE if node can pond overflows int isPonded; // TRUE if node is currently ponded double dQ; // inflow minus outflow at node (cfs) double dV; // change in node volume (ft3) double dy; // change in node depth (ft) double yMax; // max. depth at node (ft) double yOld; // node depth at previous time step (ft) double yLast; // previous node depth (ft) double yNew; // new node depth (ft) double yCrown; // depth to node crown (ft) double surfArea; // node surface area (ft2) double denom; // denominator term double corr; // correction factor double f; // relative surcharge depth // --- see if node can pond water above it canPond = (AllowPonding && Node[i].pondedArea > 0.0); isPonded = (canPond && Node[i].newDepth > Node[i].fullDepth); // --- initialize values yCrown = Node[i].crownElev - Node[i].invertElev; yOld = Node[i].oldDepth; yLast = Node[i].newDepth; Node[i].overflow = 0.0; surfArea = Xnode[i].newSurfArea; // --- determine average net flow volume into node over the time step dQ = Node[i].inflow - Node[i].outflow; dV = 0.5 * (Node[i].oldNetInflow + dQ) * dt; // --- if node not surcharged, base depth change on surface area if ( yLast <= yCrown || Node[i].type == STORAGE || isPonded ) { dy = dV / surfArea; yNew = yOld + dy; // --- save non-ponded surface area for use in surcharge algorithm //(5.1.002) if ( !isPonded ) Xnode[i].oldSurfArea = surfArea; //(5.1.002) // --- apply under-relaxation to new depth estimate if ( Steps > 0 ) { yNew = (1.0 - Omega) * yLast + Omega * yNew; } // --- don't allow a ponded node to drop much below full depth if ( isPonded && yNew < Node[i].fullDepth ) yNew = Node[i].fullDepth - FUDGE; } // --- if node surcharged, base depth change on dqdh // NOTE: depth change is w.r.t depth from previous // iteration; also, do not apply under-relaxation. else { // --- apply correction factor for upstream terminal nodes corr = 1.0; if ( Node[i].degree < 0 ) corr = 0.6; // --- allow surface area from last non-surcharged condition // to influence dqdh if depth close to crown depth denom = Xnode[i].sumdqdh; if ( yLast < 1.25 * yCrown ) { f = (yLast - yCrown) / yCrown; denom += (Xnode[i].oldSurfArea/dt - Xnode[i].sumdqdh) * exp(-15.0 * f); } // --- compute new estimate of node depth if ( denom == 0.0 ) dy = 0.0; else dy = corr * dQ / denom; yNew = yLast + dy; if ( yNew < yCrown ) yNew = yCrown - FUDGE; // --- don't allow a newly ponded node to rise much above full depth if ( canPond && yNew > Node[i].fullDepth ) yNew = Node[i].fullDepth + FUDGE; } // --- depth cannot be negative if ( yNew < 0 ) yNew = 0.0; // --- determine max. non-flooded depth yMax = Node[i].fullDepth; if ( canPond == FALSE ) yMax += Node[i].surDepth; // --- find flooded depth & volume if ( yNew > yMax ) { yNew = getFloodedDepth(i, canPond, dV, yNew, yMax, dt); } else Node[i].newVolume = node_getVolume(i, yNew); // --- compute change in depth w.r.t. time Xnode[i].dYdT = fabs(yNew - yOld) / dt; // --- save new depth for node Node[i].newDepth = yNew; } //============================================================================= double getFloodedDepth(int i, int canPond, double dV, double yNew, double yMax, double dt) // // Input: i = node index // canPond = TRUE if water can pond over node // isPonded = TRUE if water is currently ponded // dV = change in volume over time step (ft3) // yNew = current depth at node (ft) // yMax = max. depth at node before ponding (ft) // dt = time step (sec) // Output: returns depth at node when flooded (ft) // Purpose: computes depth, volume and overflow for a flooded node. // { if ( canPond == FALSE ) { Node[i].overflow = dV / dt; Node[i].newVolume = Node[i].fullVolume; yNew = yMax; } else { Node[i].newVolume = MAX((Node[i].oldVolume+dV), Node[i].fullVolume); Node[i].overflow = (Node[i].newVolume - MAX(Node[i].oldVolume, Node[i].fullVolume)) / dt; } if ( Node[i].overflow < FUDGE ) Node[i].overflow = 0.0; return yNew; } //============================================================================= double getVariableStep(double maxStep) // // Input: maxStep = user-supplied max. time step (sec) // Output: returns time step (sec) // Purpose: finds time step that satisfies stability criterion but // is no greater than the user-supplied max. time step. // { int minLink = -1; // index of link w/ min. time step int minNode = -1; // index of node w/ min. time step double tMin; // allowable time step (sec) double tMinLink; // allowable time step for links (sec) double tMinNode; // allowable time step for nodes (sec) // --- find stable time step for links & then nodes tMin = maxStep; tMinLink = getLinkStep(tMin, &minLink); tMinNode = getNodeStep(tMinLink, &minNode); // --- use smaller of the link and node time step tMin = tMinLink; if ( tMinNode < tMin ) { tMin = tMinNode ; minLink = -1; } // --- update count of times the minimum node or link was critical stats_updateCriticalTimeCount(minNode, minLink); // --- don't let time step go below an absolute minimum if ( tMin < MinRouteStep ) tMin = MinRouteStep; //(5.1.008) return tMin; } //============================================================================= double getLinkStep(double tMin, int *minLink) // // Input: tMin = critical time step found so far (sec) // Output: minLink = index of link with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for conduits based on Courant criterion. // { int i; // link index int k; // conduit index double q; // conduit flow (cfs) double t; // time step (sec) double tLink = tMin; // critical link time step (sec) // --- examine each conduit link for ( i = 0; i < Nobjects[LINK]; i++ ) { if ( Link[i].type == CONDUIT ) { // --- skip conduits with negligible flow, area or Fr k = Link[i].subIndex; q = fabs(Link[i].newFlow) / Conduit[k].barrels; if ( q <= 0.05 * Link[i].qFull || Conduit[k].a1 <= FUDGE || Link[i].froude <= 0.01 ) continue; // --- compute time step to satisfy Courant condition t = Link[i].newVolume / Conduit[k].barrels / q; t = t * Conduit[k].modLength / link_getLength(i); t = t * Link[i].froude / (1.0 + Link[i].froude) * CourantFactor; // --- update critical link time step if ( t < tLink ) { tLink = t; *minLink = i; } } } return tLink; } //============================================================================= double getNodeStep(double tMin, int *minNode) // // Input: tMin = critical time step found so far (sec) // Output: minNode = index of node with critical time step; // returns critical time step (sec) // Purpose: finds critical time step for nodes based on max. allowable // projected change in depth. // { int i; // node index double maxDepth; // max. depth allowed at node (ft) double dYdT; // change in depth per unit time (ft/sec) double t1; // time needed to reach depth limit (sec) double tNode = tMin; // critical node time step (sec) // --- find smallest time so that estimated change in nodal depth // does not exceed safety factor * maxdepth for ( i = 0; i < Nobjects[NODE]; i++ ) { // --- see if node can be skipped if ( Node[i].type == OUTFALL ) continue; if ( Node[i].newDepth <= FUDGE) continue; if ( Node[i].newDepth + FUDGE >= Node[i].crownElev - Node[i].invertElev ) continue; // --- define max. allowable depth change using crown elevation maxDepth = (Node[i].crownElev - Node[i].invertElev) * 0.25; if ( maxDepth < FUDGE ) continue; dYdT = Xnode[i].dYdT; if (dYdT < FUDGE ) continue; // --- compute time to reach max. depth & compare with critical time t1 = maxDepth / dYdT; if ( t1 < tNode ) { tNode = t1; *minNode = i; } } return tNode; }
precedence_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_PRECEDENCE_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_PRECEDENCE_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class PrecedenceMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: public: /*************************************************************************/ PrecedenceMoveGenerator(void) { /// nothing to do } /*************************************************************************/ virtual ~PrecedenceMoveGenerator(void) { /// nothing to do } /*************************************************************************/ void setup(const std::vector<model_component::Constraint< T_Variable, T_Expression> *> &a_RAW_CONSTRAINT_PTRS) { /** * Exclude constraints which contain fixed variables or selection * variables. */ auto constraint_ptrs = extract_effective_constraint_ptrs(a_RAW_CONSTRAINT_PTRS); /** * Convert constraint objects to BinomialConstraint objects. */ auto binomials = convert_to_binomial_constraints(constraint_ptrs); /** * Setup move objects. */ const int BINOMIALS_SIZE = binomials.size(); this->m_moves.resize(2 * BINOMIALS_SIZE); this->m_flags.resize(2 * BINOMIALS_SIZE); for (auto i = 0; i < BINOMIALS_SIZE; i++) { auto &move = this->m_moves[2 * i]; move.sense = MoveSense::Precedence; move.alterations.emplace_back(binomials[i].variable_ptr_first, 0); move.alterations.emplace_back(binomials[i].variable_ptr_second, 0); move.is_univariable_move = false; move.is_selection_move = false; utility::update_union_set( &(move.related_constraint_ptrs), binomials[i].variable_ptr_first->related_constraint_ptrs()); utility::update_union_set( &(move.related_constraint_ptrs), binomials[i].variable_ptr_second->related_constraint_ptrs()); move.is_special_neighborhood_move = true; move.is_available = true; move.overlap_rate = 0.0; this->m_moves[2 * i + 1] = move; } /** * Setup move updater. */ auto move_updater = // [this, binomials, BINOMIALS_SIZE]( auto * a_moves_ptr, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < BINOMIALS_SIZE; i++) { { auto index = 2 * i; auto &alterations = (*a_moves_ptr)[index].alterations; alterations[0].second = binomials[i].variable_ptr_first->value() + 1; alterations[1].second = binomials[i].variable_ptr_second->value() + 1; } { auto index = 2 * i + 1; auto &alterations = (*a_moves_ptr)[index].alterations; alterations[0].second = binomials[i].variable_ptr_first->value() - 1; alterations[1].second = binomials[i].variable_ptr_second->value() - 1; } } const int MOVES_SIZE = a_moves_ptr->size(); #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < MOVES_SIZE; i++) { (*a_flags)[i] = 1; if (!(*a_moves_ptr)[i].is_available) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_bound_violation((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (a_ACCEPT_ALL) { /** nothing to do */ } else { if (a_ACCEPT_OBJECTIVE_IMPROVABLE && neighborhood::has_objective_improvable_variable( (*a_moves_ptr)[i])) { continue; } if (a_ACCEPT_FEASIBILITY_IMPROVABLE && neighborhood::has_feasibility_improvable_variable( (*a_moves_ptr)[i])) { continue; } (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
GB_unop__isinf_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isinf_bool_fp32) // op(A') function: GB (_unop_tran__isinf_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isinf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isinf (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isinf_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isinf_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residual_based_bdf_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME ) #define KRATOS_RESIDUAL_BASED_BDF_SCHEME /* System includes */ /* External includes */ /* Project includes */ #include "includes/checks.h" #include "utilities/time_discretization.h" #include "solving_strategies/schemes/residual_based_implicit_time_scheme.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedBDFScheme * @ingroup KratosCore * @brief BDF integration scheme (for dynamic problems) * @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method. * This scheme is designed to solve a system of the type: *\f[ * \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext} * \f] * * If we call: * * - Second derivative: * -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i * - First derivative: * -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i * - Third derivative: * -# \f$ u_{ni} \f$ the variable at the step i * * Then we assume: * \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f] * \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f] * with for order 2 (BDF2): * -# \f$ c_0 = \frac{1.5}{dt} \f$ * -# \f$ c_1 = \frac{-2.0}{dt} \f$ * -# \f$ c_2 = \frac{0.5}{dt} \f$ * * The LHS and RHS can be defined as: * \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f] * and * \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f] * @note This implies that elements are expected to be written in terms * of a variable with two time derivatives * <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a> * @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace> class ResidualBasedBDFScheme : public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme ); typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef typename BaseType::Pointer BaseTypePointer; typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType; typedef typename ImplicitBaseType::TDataType TDataType; typedef typename ImplicitBaseType::DofsArrayType DofsArrayType; typedef typename Element::DofsVectorType DofsVectorType; typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType; typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType; typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef ModelPart::NodesContainerType NodesArrayType; /// Definition of epsilon static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon(); ///@} ///@name Life Cycle ///@{ /** * @brief Constructor. The BDF method * @param Order The integration order * @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives */ explicit ResidualBasedBDFScheme(const std::size_t Order = 2) :ImplicitBaseType(), mOrder(Order), mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order)) { // Allocate auxiliary memory const std::size_t num_threads = OpenMPUtils::GetNumThreads(); mVector.dotun0.resize(num_threads); mVector.dot2un0.resize(num_threads); // Doing a minimal check KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl; // We resize the BDF coefficients if (mBDF.size() != (mOrder + 1)) mBDF.resize(mOrder + 1); } /** Copy Constructor. */ explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther) :ImplicitBaseType(rOther) ,mOrder(rOther.mOrder) ,mBDF(rOther.mBDF) ,mVector(rOther.mVector) ,mpBDFUtility(nullptr) { Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder); mpBDFUtility.swap(auxiliar_pointer); } /** * Clone */ BaseTypePointer Clone() override { return BaseTypePointer( new ResidualBasedBDFScheme(*this) ); } /** Destructor. */ ~ResidualBasedBDFScheme () override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Performing the update of the solution * @details Incremental update within newton iteration. It updates the state variables at the end of the time step * \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f] * @param rModelPart The model of the problem to solve * @param rDofSet Set of all primary variables * @param rA LHS matrix * @param rDx incremental update of primary variables * @param rb RHS Vector */ void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; // Update of displacement (by DOF) mpDofUpdater->UpdateDofs(rDofSet, rDx); UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb); KRATOS_CATCH( "" ); } /** * @brief Performing the prediction of the solution * @details It predicts the solution for the current step x = xold + vold * Dt * @param rModelPart The model of the problem to solve * @param rDofSet set of all primary variables * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector */ void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; KRATOS_ERROR << "Calling base BDF class" << std::endl; KRATOS_CATCH( "" ); } /** * @brief It initializes time step solution. Only for reasons if the time step solution is restarted * @param rModelPart The model of the problem to solve * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector * @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable */ void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info); mBDF = r_current_process_info[BDF_COEFFICIENTS]; KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2) << "For higher orders than 2 the time step is assumed to be constant.\n"; KRATOS_CATCH( "" ); } /** * @brief This function is designed to be called once to perform all the checks needed on the input provided. * @details Checks can be "expensive" as the function is designed to catch user's errors. * @param rModelPart The model of the problem to solve * @return Zero means all ok */ int Check(const ModelPart& rModelPart) const override { KRATOS_TRY; const int err = ImplicitBaseType::Check(rModelPart); if(err!=0) return err; // Check for minimum value of the buffer index // Verify buffer size KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl; KRATOS_CATCH( "" ); return 0; } /// Free memory allocated by this class. void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "base_bdf_scheme", "integration_order" : 2 })"); // Getting base class default parameters const Parameters base_default_parameters = ImplicitBaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /// Turn back information as a string. std::string Info() const override { return "ResidualBasedBDFScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ struct GeneralVectors { std::vector< Vector > dotun0; /// First derivative std::vector< Vector > dot2un0; /// Second derivative }; const std::size_t mOrder; /// The integration order Vector mBDF; /// The BDF coefficients GeneralVectors mVector; /// The structure containing the derivatives Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief Performing the update of the derivatives * @param rModelPart The model of the problem to solve * @param rDofSet Set of all primary variables * @param rA LHS matrix * @param rDx incremental update of primary variables * @param rb RHS Vector */ inline void UpdateDerivatives( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { // Updating time derivatives (nodally for efficiency) const int num_nodes = static_cast<int>( rModelPart.Nodes().size() ); // Getting first node iterator const auto it_node_begin = rModelPart.Nodes().begin(); #pragma omp parallel for for(int i = 0; i< num_nodes; ++i) { auto it_node = it_node_begin + i; UpdateFirstDerivative(it_node); UpdateSecondDerivative(it_node); } } /** * @brief Updating first time derivative (velocity) * @param itNode the node interator */ virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode) { KRATOS_ERROR << "Calling base BDF class" << std::endl; } /** * @brief Updating second time derivative (acceleration) * @param itNode the node interator */ virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode) { KRATOS_ERROR << "Calling base BDF class" << std::endl; } /** * @brief It adds the dynamic LHS contribution of the elements * \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f] * @param rLHS_Contribution The dynamic contribution for the LHS * @param rD The damping matrix * @param rM The mass matrix * @param rCurrentProcessInfo The current process info instance */ void AddDynamicsToLHS( LocalSystemMatrixType& rLHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) override { // Adding mass contribution to the dynamic stiffness if (rM.size1() != 0) { // if M matrix declared noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2); } // Adding damping contribution if (rD.size1() != 0) { // if D matrix declared noalias(rLHS_Contribution) += rD * mBDF[0]; } } /** * @brief It adds the dynamic RHS contribution of the objects * \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f] * @param rObject The object to compute * @param rRHS_Contribution The dynamic contribution for the RHS * @param rD The damping matrix * @param rM The mass matrix * @param rCurrentProcessInfo The current process info instance */ template <class TObjectType> void TemplateAddDynamicsToRHS( TObjectType& rObject, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) { const std::size_t this_thread = OpenMPUtils::ThisThread(); const auto& r_const_obj_ref = rObject; // Adding inertia contribution if (rM.size1() != 0) { r_const_obj_ref.GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0); noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]); } // Adding damping contribution if (rD.size1() != 0) { r_const_obj_ref.GetFirstDerivativesVector(mVector.dotun0[this_thread], 0); noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]); } } /** * @brief It adds the dynamic RHS contribution of the elements * \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f] * @param rElement The element to compute * @param RHS_Contribution The dynamic contribution for the RHS * @param D The damping matrix * @param M The mass matrix * @param rCurrentProcessInfo The current process info instance */ void AddDynamicsToRHS( Element& rElement, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) override { TemplateAddDynamicsToRHS<Element>(rElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo); } /** * @brief It adds the dynamic RHS contribution of the condition * \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f] * @param rCondition The condition to compute * @param RHS_Contribution The dynamic contribution for the RHS * @param D The damping matrix * @param M The mass matrix * @param rCurrentProcessInfo The current process info instance */ void AddDynamicsToRHS( Condition& rCondition, LocalSystemVectorType& rRHS_Contribution, LocalSystemMatrixType& rD, LocalSystemMatrixType& rM, const ProcessInfo& rCurrentProcessInfo ) override { TemplateAddDynamicsToRHS<Condition>(rCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Utility class to perform the update after solving the system, will be different in MPI runs. typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedBDFScheme */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
omp3-0.c
#include<stdio.h> int main() { #pragma omp parallel printf("hello, world\n"); }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,const MagickBooleanType, ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if (num_images < 1) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(MagickFalse); image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->rows && (y + j) < (ssize_t) image->columns) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; register ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
cgesv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgesv.c, normal z -> c, Fri Sep 28 17:38:05 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_cgesv(int n, int nrhs, plasma_complex32_t *pA, int lda, int *ipiv, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (n < 0) { plasma_error("illegal value of n"); return -1; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_getrf(plasma, PlasmaComplexFloat, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_cgesv(A, ipiv, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request); plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_cgesv(plasma_desc_t A, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pcgetrf(A, ipiv, sequence, request); plasma_pcgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); plasma_pctrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, A, B, sequence, request); plasma_pctrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
utilities.h
// // Created by mario on 04/05/18. // #ifndef SEVN_REVISED_UTILITIES_H #define SEVN_REVISED_UTILITIES_H #include <vector> #include <sstream> #include <iomanip> #include <omp.h> #include <regex> #include <random> #include <algorithm> #include <stdexcept> #include <memory> #include <limits> #include <map> #include <sevnlog.h> using sevnstd::SevnLogging; #define _UNUSED __attribute__ ((unused)) #define openfile(a, b) utilities::_openfile(a, b, __FILE__, __LINE__) class Star; class Binstar; namespace utilities{ //random number generator (OpenMP thread-safe) extern std::mt19937_64 mtrand; #pragma omp threadprivate(utilities::mtrand) //GI 140520 changed this because a firstprivate clause is added in the main files //GI 130321 Changed again because a firstprivate clause could not be used with the current implementation //this means that this version is not compilable with INTEL compiler (beacuse mtrand is defined as extern) //extern std::uniform_real_distribution<double> rand_unif_0_1(0.0, 1.0); //SEVN NAME const std::string SEVN_NAME = "SEVN"; //NOTICE THIS HAS TO BE THE NAME OF THE FOLDER CONTAINING THE CODE //TODO: Add cgs -> SEVN units constants //constants //TODO The timestep and other time property are in Myr, but some costant and the eq. of some process are in yr, should we use only a timescale? ///Fundamental quantitis //TODO Check all the constants to be consistent with these values constexpr double G = 3.92934097327e8; //RSUN^3 YR^-2 MSUN^-1 constexpr double yr_cgs = 3.1557600e7; //yr in s constexpr double Rsun_cgs = 6.95700e10; //rsun in cm constexpr double Msun_cgs = 1.98892e33; //msun in g constexpr double G_cgs = G*Rsun_cgs*Rsun_cgs*Rsun_cgs/(Msun_cgs*yr_cgs*yr_cgs); //cm^3 s^-2 g^-1 constexpr double Sigma_StefBoltz = 7.144796315707217e-17; //LSun^3 RSun^-2 K^-4 constexpr double Myr_to_yr = 1.0e6; constexpr double yr_to_Myr = 1.0e-6; constexpr double AU_to_RSun = 2.150939909288282e2; constexpr double kms_to_RSunyr = 4.5373006461e1; constexpr double LSun_to_Solar = 1.25397997879e1; constexpr double c = 1.36024851337e7; // RSun/yr constexpr double km_to_RSun = 1.4378145219e-6; constexpr double g_to_MSun = 5.02785431289e-34; constexpr double G_over_c2 = G / (c*c); constexpr double G3_over_c5 = (G*G*G)/(c*c*c*c*c); //Scaling for GW processes constexpr double tH = 13.7*1e3; //Hubble time in Myr constexpr double Mchandra = 1.41; //Chandrasekhar mass in Msun const std::string PLACEHOLDER="xxx"; //Standard placeholder for input properties //SY //MAGIC NULL VALUES constexpr double NULL_DOUBLE = -9e30; constexpr int NULL_INT = -999999999; constexpr size_t NULL_SINT = 999999999; //constexpr std::string NULL_STR = "FORZAROMA"; //NOT POSSIBLE in C11 (It will be possible in C20) const std::string NULL_STR = "FORZAROMA"; //MAGIC LARGE AN TINY VALUES constexpr double DIFF_TOLL = 1e-10; //Tollerance on difference between two values constexpr double LARGE = 1e30; constexpr double TINY = 1e-15; constexpr double DOUBLE_EPS = std::numeric_limits<double>::epsilon(); //INT CONSTANT TO HANDLE RETURN FROM EVOLUTION typedef unsigned int evolution; constexpr int SINGLE_STEP_EVOLUTION=0; constexpr int REPEATED_EVOLUTION=1; //INT CONSTANT TO HANDLE RETURN FROM FUNCTIONS typedef unsigned int jump_convergence; constexpr int JUMP_CONVERGE=0; constexpr int JUMP=1; constexpr int NO_JUMP=2; //INT CONST FOR SN EXPLOSION typedef unsigned int sn_explosion; constexpr int SNIA_EXPLODE=1; constexpr int SNII_EXPLODE=2; constexpr int SN_NOT_EXPLODE=0; //INT CONST FOR RLO typedef unsigned int rlo; constexpr int RLO_FALSE=0; //RLO is happening or happened constexpr int RLO_TRUE=1; //RLO is happening or happened //bool CONST FOR BINARY EVOLUTION typedef bool bse_evolution; constexpr int BIN_EV_DONE = 1; //This is the return if the properties of the binary have been evolved with the proper evolve method constexpr int BIN_EV_NOT_DONE = 0; //This is the return if the properties of the binary have been evolved without the proper evolve method constexpr int BIN_EV_SETBROKEN = 2; //This is the return if the properties of the binary have not been evolved but a set broken has been called double maxwellian_cdf(double x, double sigma); double maxwellian_pdf(double x, double sigma); inline double R_Schwarzschild(double Mass){return 2.0*G_over_c2*Mass; } //rs=2GM/c^2} ////Phys /** * Estimate the RL follwing the Eggleton formalism (Eq. 53 Hurley02) * @param Mass_primary Mass of the primary in Msun (the star for which we are estimating the RL) * @param Mass_secondary Mass of the secondary in Msun. * @param a Semimajor axis in Rsun * @return Roche Lobe radius in Rsun */ double roche_lobe_Eg(double Mass_primary, double Mass_secondary, double a); /** * Estimate the Alfven radius * @param s Pointer to the star for which we want to calculate the Alfven radius * @param dMdt Accreated mass rate in Msun/yr * @param get0 AIf true use the stellar propertie at the beginning of the timestep * @return The Alfven radius in Rsun */ double R_Alfven(Star *s, double dMdt, bool get0=false); /** * Estimate the Hydrogen mass fraction of the star. * It is the one used in BSE/MOBSE * @param s Pointer to the star * @return Hydrogen mass fraction */ double Hfrac(Star *s); /** * Estimate the maximal accretion rate on an object due to the Eddington limit. * It is estimated using Eq. 67 in Hurley+02. * NOTICE the return will be in units of Myr / YR, while SEVN usually assumes Myr as scale, be careful of conversion. * @param donor Pointer to the star that is donating mass * @param accretor Pointer to the star that is accreting mass * @return return the Eddington accretion rante in Msun/yr */ double dMdt_Eddington_accretion(Star *donor, Star *accretor); /** * Critical angular velocity * @param Mass mass in Msun * @param Rpolar polar radius in Rsun * @return critical angular velocity in yr^-1 */ inline static double omega_crit(double Mass, double Rpolar){ double Reqc = 1.5 *Rpolar; return std::sqrt(utilities::G*Mass/(Reqc*Reqc*Reqc)); } /** * Like wait but it works also outside debug mode */ inline void hardwait(){ std::cout<<"\nWaiting"<<std::endl; std::cin.get(); } /** * Like wait but it works also outside debug mode */ template<typename T, typename... Tail> void hardwait(T head, Tail... tail){ std::cout << head << " "; hardwait(tail...); } /** * Base class for the variadic function wait. * @param _message */ inline void wait(){ #ifdef DEBUG std::cout<<"\nWaiting"<<std::endl; char _fake; std::cin>>_fake; #endif } /** * Variadic function. It prints a message in std::cout (from an argument pack) and wait of a cin input from the user. * @tparam T template param of the value to be written * @tparam Tail template params of the pack of parameters to iterate over * @param head value to be written * @param tail rest of the packs */ template<typename T, typename... Tail> void wait(_UNUSED T head, _UNUSED Tail... tail){ #ifdef DEBUG std::cout << head << " "; wait(tail...); #endif } /********** Template variadic function to be used in the log *******/ std::string get_name(Star* s); long get_ID(Star* s); std::string get_name(Binstar* b); long get_ID(Binstar* b); double get_current_time(Star* s); double get_current_time(Binstar* b); template <typename T> void _log_print_core(std::stringstream &ss, T t){ ss<<t; return; } template <typename T, typename... ListP> void _log_print_core(std::stringstream &ss, T t, ListP... args){ ss<<t<<":"; _log_print_core(ss,args...); return; } template <class System, typename... ListP> std::string common_log_print(const std::string& label, System* system, ListP... args){ std::stringstream ss; _log_print_core(ss,args...); std::string id_str=std::to_string(get_ID(system)),time=std::to_string(get_current_time(system)); return get_name(system) +";" +id_str + ";" +label + ";" + time + ";" + ss.str(); } template <class System, typename... ListP> std::string common_log_print(const std::string& label, System* system){ std::string id_str=std::to_string(get_ID(system)),time=std::to_string(get_current_time(system)); return get_name(system) +";" +id_str + ";" +label + ";" + time + ";"; } template <typename... ListP> std::string log_print(const std::string& label, Star* star, ListP... args){ return "S;" + common_log_print(label,star,args...); } template <typename... ListP> std::string log_print(const std::string& label, Binstar *binstar, ListP... args){ return "B;" + common_log_print(label,binstar,args...); } /** * Utility to write Star info in a log format * @param s Pointer to star * @param oldstep if true get the property of the last step * @return a string with ID:Mass:MHE:MCO:Phase:RemnanType */ std::string log_star_info(Star* s, bool oldstep=false); /*************************************************************************************/ unsigned long gen_rseed(); unsigned long gen_rseed(std::random_device &rd); inline const std::string random_keygen(std::mt19937_64 *mtrand){ //std::string allkeys = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; std::string keys_init = "123456789"; //GI71119: MORE SIMILAT TO A source_id GAIA like (it can easier to load and manage as a pure number table) std::string keys = "0123456789"; std::uniform_int_distribution<int> unifo_keys_init(0, (int)keys_init.size()-1); std::uniform_int_distribution<int> unifo_keys(0, (int)keys.size()-1); std::string key = ""; key += keys_init[(unifo_keys_init(*mtrand))]; for(int i = 0; i < 14; i++){ int position = unifo_keys(*mtrand); key += keys[position]; } return key; } std::vector<std::string> split(const std::string& s, char delimiter); template <class T> inline bool isifstream(); template <> inline bool isifstream<std::ifstream>() {return true;} //general functions template <typename T> size_t binary_search(T *array, const size_t left, const size_t right, const T value){ if (right > left+1) { const size_t mid = left + ((right - left)>>1); //std::cout<<left<<" "<<mid<<" "<<right<<std::endl; //std::cout<<"****"<<array[left]<<std::endl; //std::cout<<array[mid]<<std::endl; //std::cout<<array[right]<<std::endl; if(array[mid] > value) return binary_search(array, left, mid, value); else if (array[mid] == value) return mid; else return binary_search(array, mid, right, value); } if(array[left] == value) return left; else if(array[right] == value) return right; else return left; } template <typename T> bool string_is_number(std::string str) { T value; std::stringstream stream(str); stream >> value; return (!stream.fail() and stream.eof()) ; } /** * Solve the Kepler equation to found the Eccentric anomaly starting from eccentricy and Mean anomaly. * We use the generalized Newton Raphson’s method (Eq. 2.6, Nazeer 2016). * If in some step we have numerical problem with the second derivative, we revert to a simple NR method for that step * @param ecc Eccentricity * @param m Mean anomaly * @param tol tollerance * @param maxit maximum number of iteration * @return Eccentric anomaly */ inline double kepler(const double &ecc, const double &m, const double tol = 1e-6, const int maxit = 50) { // it solves the Kepler's equation giving the mean anomaly and the eccentricity //We use a Generalize Newton-Raphson method that converge cubically (instead of quadratically for the classical Newton Raphson) //Given the Kepler Equation we can have problem if f1 is 0, if f2 is 0 or if f1*f1-2*f*f2 is <=0 //f1=1-ecc*cos(E) but ecc<1 always so f1<1 as well. When f2 is 0 or f1*f1-2*f*f2 is <=0 we revert to a simple NR for that step int k = 1; // to check max iterations double em = m; //em = eccentric anomaly (output), m = mean anomaly (input) double sinem = sin(em); //If eccentricity is 0 we have M=E, i.e. the Eccentricity anomaly is equal to the Mean anomaly if (ecc==0) return m; double f = em - ecc*sinem - m; double f1 = 1.0 - ecc*cos(em); double f2 = ecc*sinem; double fsquare=f1*f1 - 2*f*f2; auto check_problem = [&f2,&fsquare](){ return f2==0 or fsquare<=0;}; //Lmabda expression to check if I have to pass to a NR if (check_problem()) //If the second derivatie is 0 make a step using the first order Newthon Methond em = em - f/f1; else em = em + (-f1 + sqrt(fsquare))/f2; // iterations while(fabs(f) > tol && k < maxit){ sinem = sin(em); f = em - ecc*sinem - m; f1 = 1.0 - ecc*cos(em); f2 = ecc*sinem; fsquare=f1*f1 - 2*f*f2; if (check_problem()) //If the second derivatie is 0 make a step using the first order Newthon Methond em = em - f/f1; else em = em + (-f1 + sqrt(fsquare))/f2; k++; } return em; } //templates template <typename T> const T s2n(std::string &str, const char* file_input, const int line_input) { SevnLogging svlog; T value; std::stringstream stream(str); stream >> value; if (stream.fail()) svlog.critical("Cannot convert the string " + str + "into a number", file_input, line_input,sevnstd::sevnio_error()); return value; } /** * @tparam T template type. It should be able to accept anything that can be << to a stream obejct. * @param val value of type T to be converted to a string * @param file_input it should be set to \code{.cpp} __FILE__ \endcode * @param line_input it should be set to \code{.cpp} __LINE__ \endcode * @param precision set precision for the scientific format output of numbers * @return The \p val converted to a string. If val is a number the string is the scientific format with precision * equal to \p precision. */ template <typename T> const std::string n2s(T val, const char* file_input, const int line_input, const unsigned int precision=6) { SevnLogging svlog; std::ostringstream stream; stream << std::scientific << std::setprecision(precision); stream << val; if (stream.fail()) svlog.critical("Cannot convert into a string", file_input, line_input); return stream.str(); } template <typename T> void _openfile(T &in, const std::string f, const char* file_input, const int line_input){ SevnLogging svlog; if(in.is_open()) in.close(); if(utilities::isifstream<T>()) in.open(f.c_str(), std::ios::in); else in.open(f.c_str(), std::ios::out); if(!in) svlog.critical("Cannot open file " + f, file_input, line_input,sevnstd::sevnio_error()); } //useful function to sort arrays and indexes inline bool wayToSort(int i, int j) { return i > j; } template <typename T> T dirname2n(std::string str, const char* file_input, const int line_input){ std::size_t found = str.find('.'); if (found == std::string::npos) str.insert(1,"."); return utilities::s2n<T>(str, file_input, line_input); } //GI /** * Functions to print all the elements of a vector in a Python numpy style (GI) */ template <typename T> void print_vector(const std::vector<T>& v){ std::cout<< "[ "; for (auto& element : v) std::cout << element << " "; std::cout<< "]"<<std::endl; } //GI /** * Functions to generate a complete filename path containing also the directory * @param _folder A string containing the folder path * @param _fname Complete filename including also the .extension if any (_fname=_fname_root . _fname_extension) * @param print_threads If true the number of threads will be added to the filename * @return a string with _folder/_fname_root_nthreads._fname_etension if print_threads=true otherwise _folder/_fname_root._fname_extension */ inline std::string gen_filename(const std::string &_folder, const std::string &_fname, bool print_threads=true){ std::string return_string; std::string folder = _folder.back()=='/' ? _folder.substr(0, _folder.length()-1) : _folder; //GI 141219: To avoid to have a double / if the _folder in input already contains a / at the end if (print_threads){ std::size_t found_extension; //GI 141219: Simple loop to get only the last occurence of . in the string for ( std::size_t pos=0; pos!=std::string::npos; pos=_fname.find('.',pos+1)) found_extension = pos; if (found_extension==0) found_extension = _fname.length(); return_string = folder + "/" + _fname.substr(0, found_extension) + "_" + std::to_string(omp_get_thread_num()) + _fname.substr(found_extension); } else { return_string = folder + "/" + _fname; } return return_string; } /** * Find the slope and the intercept of the line passing through (x1,y1), (x2,y2) * @param x1 x-value of the first point * @param x2 x-value of the second point * @param y1 y-value of the first point * @param y2 y-value of the second point * @param slope this value stores the estimated slope * @param intercept this value store the estimated intercept * @return */ inline int find_line(const double & x1, const double & x2, const double & y1, const double & y2, double & slope, double & intercept){ slope = (y2 - y1)/(x2-x1); intercept = y2 -slope*x2; return EXIT_SUCCESS; } template<typename T> double rel_difference(T val1, T val2){ return fabs( (val1-val2)/val1 ); } inline void swap_stars(Star* & s1, Star* & s2){ Star *stmp=s1; s1=s2; s2=stmp; } /** * Trim a string from all whitespaces * @param s * @return */ inline std::string trim(const std::string& s) { return std::regex_replace(s, std::regex("^[ \\s]+|[ \\s]+$"), std::string("")); } /** * Template function to check if a given element is inside a container. * @tparam T typename of the element to be checked * @tparam Iter typename of the iterator * @param element element to be checked * @param it iterator to the begin of the container * @param end iteratore to the end of the container * @return true if the element is inside the list, otherwise false. */ template<typename T, typename Iter> bool isinlist(T element, Iter it, Iter end){ return std::find(it, end, element)!=end; } /** * Make the string %plife:phase * @param plife Life percentage at a given Phase * @param Phase Phase to consider * @return string %plife:phase */ inline std::string make_pfile_str(const double plife, const size_t Phase, const unsigned int min_precision=6){ //In the following rows we estimate plife and the transform the number to a string to initialise stars. //Hovewer, if plife is very close to 0 or 1 without enough number of digits we can artificially force plife to be 0 or 1 //Theregore we dynamically set the precision estimating the difference between plife and 0 or 1, taking the exponent of log10 //and using the next larger integer. unsigned int precision; //Set the precision to transform pfile from number to string unsigned int digit_0= std::ceil(std::abs(std::log10(std::abs(plife-0)))); unsigned int digit_1= std::ceil(std::abs(std::log10(std::abs(plife-1)))); precision=std::max(std::max(digit_0,digit_1),min_precision); //Consistency check std::stringstream tini_ss; //string containing the plife (see below) //Close to plife=1 is necessary to use a large number of digits to avoid to set plife=1 when trasformi to string tini_ss << "%" << std::setprecision(precision) << plife * 100 << ":" << Phase; //Starting time return tini_ss.str(); } //Interpolator /** * Estimate the y value using 1D linear interpolation * @tparam T type of the of interpolated value * @param xp * @param x_interp 1D vector containing the x-value of the interpolating tables (in ascending order) * @param y_interp 1D vector containing the y-value of the interpolating tables * @param equispaced_interval if true the value in x_interp are equi-spaced * @param ext_raise if true and xp is out of boundary raise an error otherwise return the extrema of yinterp * @return interpolated y value at xp. */ template <typename T> T interpolate_1D(T xp, std::vector<T>& x_interp, std::vector<T>& y_interp, bool equispaced_interval=false, bool ext_raise=false){ if (xp<x_interp[0] and ext_raise) throw sevnstd::sevnerr("Error in interpolate_1D in utility.h: xp is out of boundary."); else if (xp<=x_interp[0]) return y_interp[0]; if (xp>x_interp.back() and ext_raise) throw sevnstd::sevnerr("Error in interpolate_1D in utility.h: xp is out of boundary."); else if (xp>=x_interp.back()) return y_interp.back(); size_t pos; if (equispaced_interval){ double dx = x_interp[1]-x_interp[0]; pos = int( (xp-x_interp[0])/dx); //int return floor } else{ pos = binary_search(&x_interp[0], 0, x_interp.size()-1, xp); } return (y_interp[pos+1]-y_interp[pos])/(x_interp[pos+1]-x_interp[pos])*(xp-x_interp[pos])+y_interp[pos]; } /** * Get a portion of path name till the given split_string. The split_string is include. * @param path Complete path * @param split_string string patter where to cut the complete path * @return the substring from 0 to split_string */ inline std::string get_subpath(std::string path, std::string split_string, bool include_split_string=true){ size_t tt= path.find(split_string); std::string subpath; if (include_split_string){ subpath = path.substr(0,tt+split_string.size()); } else{ subpath = path.substr(0,tt); } return subpath; } /** * Transpose a matrix * @tparam T * @param MatrixT Transposed Matrix * @param Matrix Matrix to be transposed */ template <typename T> void transpose(std::vector<std::vector<T>>& MatrixT, std::vector<std::vector<T>>& Matrix){ MatrixT.resize(Matrix.size()); for (auto& Matrix_row : Matrix){ for (int j=0; j<(int)Matrix_row.size(); j++) MatrixT[j].push_back(Matrix_row[j]); } } /** * Generic function to find an element in vector and also its position. * @tparam T * @param vecOfElements Vector to look for the element * @param element element to find in the vector * @return It returns an integer int : Represents the index of element in vector if its found else -1 */ template < typename T> int findInVector(const std::vector<T> & vecOfElements, const T & element) { int result ; // Find given element in vector auto it = std::find(vecOfElements.begin(), vecOfElements.end(), element); if (it != vecOfElements.end()) { result = distance(vecOfElements.begin(), it); } else { result=-1; } return result; } template <typename Key, typename Value> std::map<Value,Key> flip_map(const std::map<Key,Value> &original_map){ std::map<Value,Key> flipped_map; for (auto& pair : original_map){ flipped_map[pair.second] = pair.first; } return flipped_map; } /** * Define make_unique for pre-C++14. Notice this is the the same of the implementation of C++14 standard for single object, * in the std there is also another implementation for arrays. * @tparam T Type of the unique ptr * @tparam Args * @param args arg to initialise the object pointed by the unique ptr * @return return the unique pointer to the element of type T. */ template<typename T, typename... Args> inline std::unique_ptr<T> make_unique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } //! Class to handle the generation of a list of values /*! The class usage is based on the methods get(), next() and empty(). get() return the current value, next() upate the value and empty() return true if the generator reach the ends. If empty() is true calling next() or get() throws an out_of_range error. */ class ListGenerator{ private: double vcurrent; double vstep=std::nan(""); double vmin=0.; double vmax=2E30; std::vector<double> vlist; std::vector<double>::iterator begin; std::vector<double>::iterator end; std::vector<double>::iterator current; bool end_of_list=false; inline void initialise_tlist_iterators(){ begin=vlist.begin(); end=vlist.end(); current=begin; } public: ListGenerator(){ vlist={}; initialise_tlist_iterators(); } /** * Constructor based on a step value. The values will be generated from _vstep_min to _vstep_max (can be not included) * with step _vstep. If _vstep_max is not given the generation can last theoretically forever (_vstep_max default is 1E30). * @param _vstep step value to generate the values * @param _vstep_max maximum value. * @param _vstep_min Starting value. */ explicit ListGenerator(double _vstep, double _vstep_max=std::nan(""), double _vstep_min=std::nan("")) : vstep{_vstep}, vmin{_vstep_min}, vmax{_vstep_max} { if (vstep<=0) throw std::runtime_error("ListGenerator::vstep cannot be negative or zero"); if (std::isnan(vmin)) vmin=vstep; if (std::isnan(vmax)) vmax=2E30; if(vmin >= vmax) throw std::runtime_error("ListGenerator::vmin cannot be larger than ListGenerator::vmax"); vcurrent=vmin; } /** * Constructor based on a vector of doubles. The class will slice all the values in the vector and will be marked as * empty when the end of the vector is reached. * @param _tlist Vector of doubles */ explicit ListGenerator(const std::vector<double>& _tlist) : vlist{_tlist} { if (!std::is_sorted(vlist.begin(),vlist.end())) throw std::runtime_error("The input vector in the ListGenerator constructor is not sorted"); initialise_tlist_iterators(); if (!vlist.empty()){ vcurrent=*current; vmin=*begin; vmax=*(end-1); } } static std::unique_ptr<ListGenerator> make_unique(double _vstep, double _vstep_max=std::nan(""), double _vstep_min=std::nan("")); static std::unique_ptr<ListGenerator> make_unique(std::vector<double> _vlist); inline double get() const{ if (empty()) throw std::out_of_range("The list of times reached the end. The current value is undefined"); return vcurrent; } inline double get_max() const {return vmax;} inline double get_min() const {return vmin;} inline bool empty() const {return end_of_list;} inline void next(){ //If empty just throw an out_of_range error if (empty()) throw std::out_of_range("The list of times reached the end. The next value is undefined"); //If we are using the vstep implementation and the next step is going beyond vmax flag end_of_list else if(!std::isnan(vstep) and vcurrent+vstep>vmax) end_of_list=true; //If we are using the vstep implementation and the next step is not going beyond vmax just increment vcurrent else if(!std::isnan(vstep)) vcurrent+=vstep; //Now start with the vlist implementation check //Update the iterator and check if we reach the end else if(++current==end){ end_of_list=true; } //If we are here We are still inside the list, update the iterator and vcurrent. else{ vcurrent=*current; } } inline double operator++(){ next(); return vcurrent; } inline double operator++(int){ double old_t=vcurrent; next(); return old_t; } /** * Similar to next, but just return the next value withotu uptading anything * @return The next value if not empty, otherwise std::nan */ inline double forecast() const { //If empty just throw an out_of_range error if (empty()) return std::nan(""); else if(!std::isnan(vstep) and vcurrent+vstep>vmax) return std::nan(""); else if(!std::isnan(vstep)) return vcurrent+vstep; else if(current+1==end) return std::nan(""); else return *(current+1); } }; } #endif //SEVN_REVISED_UTILITIES_H
vel_pr_criteria.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #ifndef KRATOS_VEL_PR_CRITERIA_H #define KRATOS_VEL_PR_CRITERIA_H /* Project includes */ #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "includes/define.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" namespace Kratos { ///@addtogroup IncompressibleFluidApplication ///@{ ///@name Kratos Classes ///@{ /// Convergence criteria for fluid problems. /** This class implements a convergence control based on nodal velocity and pressure values. The error is evaluated separately for each of them, and relative and absolute tolerances for both must be specified. */ template< class TSparseSpace, class TDenseSpace > class VelPrCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( VelPrCriteria ); typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef OpenMPUtils::PartitionVector PartitionVector; typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param VelRatioTolerance Relative tolerance for velocity error * @param VelAbsTolerance Absolute tolerance for velocity error * @param PrsRatioTolerance Relative tolerance for presssure error * @param PrsAbsTolerance Absolute tolerance for presssure error */ VelPrCriteria( TDataType VelRatioTolerance, TDataType VelAbsTolerance, TDataType PrsRatioTolerance, TDataType PrsAbsTolerance) : ConvergenceCriteria< TSparseSpace, TDenseSpace >() { mVelRatioTolerance = VelRatioTolerance; mVelAbsTolerance = VelAbsTolerance; mPrRatioTolerance = PrsRatioTolerance; mPrAbsTolerance = PrsAbsTolerance; } /// Destructor. ~VelPrCriteria() override {} ///@} ///@name Operators ///@{ /// Compute relative and absoute error. /** * @param rModelPart Reference to the ModelPart containing the fluid problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override { if (SparseSpaceType::Size(Dx) != 0) //if we are solving for something { // Initialize TDataType VelSolutionNorm = 0.0; TDataType PrSolutionNorm = 0.0; TDataType VelIncreaseNorm = 0.0; TDataType PrIncreaseNorm = 0.0; unsigned int VelDofNum(0),PrDofNum(0); // Set a partition for OpenMP int NumDofs = rDofSet.size(); PartitionVector DofPartition; int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::DivideInPartitions(NumDofs,NumThreads,DofPartition); // Loop over Dofs #pragma omp parallel reduction(+:VelSolutionNorm,PrSolutionNorm,VelIncreaseNorm,PrIncreaseNorm,VelDofNum,PrDofNum) { int k = OpenMPUtils::ThisThread(); typename DofsArrayType::iterator DofBegin = rDofSet.begin() + DofPartition[k]; typename DofsArrayType::iterator DofEnd = rDofSet.begin() + DofPartition[k+1]; std::size_t DofId; TDataType DofValue; TDataType DofIncr; for (typename DofsArrayType::iterator itDof = DofBegin; itDof != DofEnd; ++itDof) { if (itDof->IsFree()) { DofId = itDof->EquationId(); DofValue = itDof->GetSolutionStepValue(0); DofIncr = Dx[DofId]; const auto& CurrVar = itDof->GetVariable(); if ((CurrVar == VELOCITY_X) || (CurrVar == VELOCITY_Y) || (CurrVar == VELOCITY_Z)) { VelSolutionNorm += DofValue * DofValue; VelIncreaseNorm += DofIncr * DofIncr; ++VelDofNum; } else { PrSolutionNorm += DofValue * DofValue; PrIncreaseNorm += DofIncr * DofIncr; ++PrDofNum; } } } } if(VelSolutionNorm == 0.0) VelSolutionNorm = 1.0; if(PrSolutionNorm == 0.0) PrSolutionNorm = 1.0; TDataType VelRatio = sqrt(VelIncreaseNorm/VelSolutionNorm); TDataType PrRatio = sqrt(PrIncreaseNorm/PrSolutionNorm); TDataType VelAbs = sqrt(VelIncreaseNorm)/ static_cast<TDataType>(VelDofNum); TDataType PrAbs = sqrt(PrIncreaseNorm)/ static_cast<TDataType>(PrDofNum); if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { std::cout << "CONVERGENCE CHECK:" << std::endl; std::cout << " VELOC.: ratio = " << VelRatio <<"; exp.ratio = " << mVelRatioTolerance << " abs = " << VelAbs << " exp.abs = " << mVelAbsTolerance << std::endl; std::cout << " PRESS.: ratio = " << PrRatio <<"; exp.ratio = " << mPrRatioTolerance << " abs = " << PrAbs << " exp.abs = " << mPrAbsTolerance << std::endl; } if ( (VelRatio <= mVelRatioTolerance || VelAbs <= mVelAbsTolerance) && (PrRatio <= mPrRatioTolerance || PrAbs <= mPrAbsTolerance) ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { std::cout << "*** CONVERGENCE IS ACHIEVED ***" << std::endl; } return true; } else { return false; } } else //in this case all the displacements are imposed! { return true; } } /// Initialize this class before using it /** * @param rModelPart Reference to the ModelPart containing the fluid problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; } void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override {} void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override {} ///@} // Operations private: TDataType mVelRatioTolerance; TDataType mVelAbsTolerance; TDataType mPrRatioTolerance; TDataType mPrAbsTolerance; }; ///@} // Kratos classes ///@} // Application group } #endif /* _VEL_PR_CRITERIA_H */
d3.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> //#include <omp.h> void fillMatrice(int*** matrice, int matrice_size){ *matrice = malloc(matrice_size * sizeof(int*)); for(int i=0; i<matrice_size; ++i){ (*matrice)[i] = malloc(matrice_size * sizeof(int)); for(int j=0; j<matrice_size; ++j){ scanf("%d", &(*matrice)[i][j]); } } } int** initMatrice(int matrice_size){ int** matrice = malloc(matrice_size * sizeof(int*)); for(int i=0; i<matrice_size; ++i){ matrice[i] = malloc(matrice_size * sizeof(int)); for(int j=0; j<matrice_size; ++j){ matrice[i][j] = 0; } } return matrice; } void printMatrice(int** matrice, int matrice_size){ for(int i=0; i<matrice_size; ++i){ for(int j=0; j<matrice_size; ++j){ printf("%5d ", matrice[i][j]); } printf("\n"); } } void arrayBroadcast(int** a, int matrice_size, int master){ for(int i = 0; i < matrice_size; ++i){ MPI_Bcast(a[i], matrice_size, MPI_INT, master, MPI_COMM_WORLD); } } int main (int argc, char *argv[]) { int id, p; int matrice_size, from, to; int** a; int** b; double elapsed_time; int *rcvcounts; int *displs; int *positions; MPI_Init(&argc, &argv); MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &id); MPI_Comm_size(MPI_COMM_WORLD, &p); rcvcounts = calloc(p, sizeof(int)); displs = calloc(p, sizeof(int)); positions = calloc(p, sizeof(int)); if(id == 0){ scanf("%d", &matrice_size); fillMatrice(&a, matrice_size); fillMatrice(&b, matrice_size); elapsed_time = -MPI_Wtime(); p = p>matrice_size ? matrice_size : p; int modulo = matrice_size % p; int nb = 0; for(int i = 0; i < p ;++i){ int numberElements = matrice_size/p; if(modulo > 0){ ++numberElements; --modulo; } if(i == 0) positions[i] = numberElements; else positions[i] = positions[i-1] + numberElements; numberElements *= matrice_size; rcvcounts[i] = numberElements; displs[i] = nb; nb += numberElements; } } MPI_Bcast(&matrice_size, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(rcvcounts, p, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(displs, p, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(positions, p, MPI_INT, 0, MPI_COMM_WORLD); if(id != 0){ a=initMatrice(matrice_size); b=initMatrice(matrice_size); } int *c = malloc(sizeof(int) * matrice_size * matrice_size); arrayBroadcast(a, matrice_size, 0); arrayBroadcast(b, matrice_size, 0); to = positions[id]; from = to - (rcvcounts[id] / matrice_size); // #pragma omp parallel for for(int i =from; i<to; ++i) { for(int j = 0; j<matrice_size; ++j){ int su = 0; for(int k=0; k<matrice_size; ++k){ su += a[i][k] * b[k][j]; } c[i *matrice_size + j] = su; } } MPI_Gatherv(&c[from * matrice_size], rcvcounts[id], MPI_INT, c, rcvcounts, displs, MPI_INT, 0, MPI_COMM_WORLD); if(id == 0 && argc > 1 && argv[1][0] == '-' && argv[1][1] == 'p'){ printf("-------------\n"); for(int i=0; i<matrice_size; ++i){ for(int j=0; j<matrice_size; ++j){ printf("%5d ", c[i * matrice_size + j]); } printf("\n"); } printf("-------------\n"); } elapsed_time +=MPI_Wtime(); //if(!id){ //printf("%10.6f\n", elapsed_time); //} MPI_Finalize(); return 0; }
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for OpenMP header file here: #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for private(n,m) for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of OpenMP threads to be Nthreads here: omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time // start run time double start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) { for (t4=max(max(ceild(t1-252,256),ceild(4*t2-Nz-499,512)),ceild(32*t3-Ny-499,512));t4<=min(min(min(floord(4*Nt+Nx-9,512),floord(2*t1+Nx-3,512)),floord(4*t2+Nx-9,512)),floord(32*t3+Nx+19,512));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(512*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
10_data-env4.c
#include <stdio.h> #include <omp.h> #include <stdlib.h> int main(int argc, char** argv) { int x = 100; omp_set_num_threads(20); #pragma omp parallel firstprivate(x) { printf("Sou a thread %d, meu valor de x é %d\n", omp_get_thread_num(), x); x = omp_get_thread_num(); } printf("X:%d\n", x); return 0; }
GB_unaryop__minv_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_fp32 // op(A') function: GB_tran__minv_uint32_fp32 // C type: uint32_t // A type: float // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_fp32 ( uint32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl> decompositionDecl; /// Matches binding declarations /// Example matches \c foo and \c bar /// (matcher = bindingDecl() /// /// \code /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl> bindingDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } template <typename... T> internal::Matcher<typename internal::GetClade<T...>::Type> traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) { return traverse(TK, InnerMatcher.with()); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode /// See also the binaryOperation() matcher for more-general matching of binary /// uses of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches rewritten binary operators /// /// Example matches use of "<": /// \code /// #include <compare> /// struct HasSpaceshipMem { /// int a; /// constexpr auto operator<=>(const HasSpaceshipMem&) const = default; /// }; /// void compare() { /// HasSpaceshipMem hs1, hs2; /// if (hs1 < hs2) /// return; /// } /// \endcode /// See also the binaryOperation() matcher for more-general matching /// of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator> cxxRewrittenBinaryOperator; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches C11 _Generic expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr> genericSelectionExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode /// See also the binaryOperation() matcher for more-general matching. extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches any of the \p NodeMatchers with InnerMatchers nested within /// /// Given /// \code /// if (true); /// for (; true; ); /// \endcode /// with the matcher /// \code /// mapAnyOf(ifStmt, forStmt).with( /// hasCondition(cxxBoolLiteralExpr(equals(true))) /// ).bind("trueCond") /// \endcode /// matches the \c if and the \c for. It is equivalent to: /// \code /// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true))); /// anyOf( /// ifStmt(trueCond).bind("trueCond"), /// forStmt(trueCond).bind("trueCond") /// ); /// \endcode /// /// The with() chain-call accepts zero or more matchers which are combined /// as-if with allOf() in each of the node matchers. /// Usable as: Any Matcher template <typename T, typename... U> auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) { return internal::MapAnyOfHelper<U...>(); } /// Matches nodes which can be used with binary operators. /// /// The code /// \code /// var1 != var2; /// \endcode /// might be represented in the clang AST as a binaryOperator, a /// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on /// /// * whether the types of var1 and var2 are fundamental (binaryOperator) or at /// least one is a class type (cxxOperatorCallExpr) /// * whether the code appears in a template declaration, if at least one of the /// vars is a dependent-type (binaryOperator) /// * whether the code relies on a rewritten binary operator, such as a /// spaceship operator or an inverted equality operator /// (cxxRewrittenBinaryOperator) /// /// This matcher elides details in places where the matchers for the nodes are /// compatible. /// /// Given /// \code /// binaryOperation( /// hasOperatorName("!="), /// hasLHS(expr().bind("lhs")), /// hasRHS(expr().bind("rhs")) /// ) /// \endcode /// matches each use of "!=" in: /// \code /// struct S{ /// bool operator!=(const S&) const; /// }; /// /// void foo() /// { /// 1 != 2; /// S() != S(); /// } /// /// template<typename T> /// void templ() /// { /// 1 != 2; /// T() != S(); /// } /// struct HasOpEq /// { /// bool operator==(const HasOpEq &) const; /// }; /// /// void inverse() /// { /// HasOpEq s1; /// HasOpEq s2; /// if (s1 != s2) /// return; /// } /// /// struct HasSpaceship /// { /// bool operator<=>(const HasOpEq &) const; /// }; /// /// void use_spaceship() /// { /// HasSpaceship s1; /// HasSpaceship s2; /// if (s1 != s2) /// return; /// } /// \endcode extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator> binaryOperation; /// Matches function calls and constructor calls /// /// Because CallExpr and CXXConstructExpr do not share a common /// base class with API accessing arguments etc, AST Matchers for code /// which should match both are typically duplicated. This matcher /// removes the need for duplication. /// /// Given code /// \code /// struct ConstructorTakesInt /// { /// ConstructorTakesInt(int i) {} /// }; /// /// void callTakesInt(int i) /// { /// } /// /// void doCall() /// { /// callTakesInt(42); /// } /// /// void doConstruct() /// { /// ConstructorTakesInt cti(42); /// } /// \endcode /// /// The matcher /// \code /// invocation(hasArgument(0, integerLiteral(equals(42)))) /// \endcode /// matches the expression in both doCall and doConstruct extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// hasAnyBody(functionDecl()) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P( hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::string, Name) { if (Optional<StringRef> OpName = internal::getOpName(Node)) return *OpName == Name; return false; } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = internal::getLHS(Node); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = internal::getRHS(Node); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. AST_POLYMORPHIC_MATCHER_P( hasEitherOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, InnerMatcher) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode AST_POLYMORPHIC_MATCHER_P2( hasOperands, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1)))) .matches(Node, Finder, Builder); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator, CXXOperatorCallExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Operand = internal::getSubExpr(Node); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { ASTChildrenNotSpelledInSourceScope RAII(Finder, false); const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; ASTChildrenNotSpelledInSourceScope RAII(Finder, false); return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches the DecompositionDecl the binding belongs to. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// bindingDecl(hasName("f"), /// forDecomposition(decompositionDecl()) /// \endcode /// matches 'f' in 'auto &[f, s, t]'. AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>, InnerMatcher) { if (const ValueDecl *VD = Node.getDecomposedDecl()) return InnerMatcher.matches(*VD, Finder, Builder); return false; } /// Matches the Nth binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasBinding(0, /// bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N, internal::Matcher<BindingDecl>, InnerMatcher) { if (Node.bindings().size() <= N) return false; return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder); } /// Matches any binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>, InnerMatcher) { return llvm::any_of(Node.bindings(), [&](const auto *Binding) { return InnerMatcher.matches(*Binding, Finder, Builder); }); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
ipagerank.c
//------------------------------------------------------------------------------ // SuiteSparse/GraphBLAS/Demo/Source/ipagerank: pagerank using uint64 semiring //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. // A is a square unsymmetric binary matrix of size n-by-n, where A(i,j) is the // edge (i,j). Self-edges are OK. A can be of any built-in type. // On output, P is pointer to an array of iPageRank structs. P[0] is the // highest ranked page, with pagerank P[0].pagerank and the page corresponds to // node number P[0].page in the graph. P[1] is the next page, and so on, to // the lowest-ranked page P[n-1].page with rank P[n-1].pagerank. // See ipagerank.m for the equivalent computation in MATLAB (except the random // number generator differs). // This method uses no floating-point arithmetic at all. //------------------------------------------------------------------------------ // helper macros //------------------------------------------------------------------------------ // free all workspace #define FREEWORK \ { \ GrB_free (&C) ; \ GrB_free (&r) ; \ if (I != NULL) free (I) ; \ if (X != NULL) free (X) ; \ GrB_free (&op_scale) ; \ GrB_free (&op_div) ; \ } // error handler: free output P and all workspace (used by CHECK and OK macros) #define FREE_ALL \ { \ if (P != NULL) free (P) ; \ FREEWORK ; \ } #include "demos.h" //------------------------------------------------------------------------------ // scalar operators //------------------------------------------------------------------------------ uint64_t ic ; #pragma omp threadprivate(ic) // scale by the integer ic void iscale (uint64_t *z, const uint64_t *x) { (*z) = ic * (*x) ; } // divide an integer by ZSCALE = 2^30, guard against integer underflow void idiv (uint64_t *z, const uint64_t *x) { (*z) = (*x) / ZSCALE ; if ((*z) == 0) (*z) = 1 ; } //------------------------------------------------------------------------------ // comparison function for qsort //------------------------------------------------------------------------------ int icompar (const void *x, const void *y) { iPageRank *a = (iPageRank *) x ; iPageRank *b = (iPageRank *) y ; // sort by pagerank in descending order if (a->pagerank > b->pagerank) { return (-1) ; } else if (a->pagerank == b->pagerank) { return (0) ; } else { return (1) ; } } //------------------------------------------------------------------------------ // ipagerank: compute the iPageRank of all nodes in a graph //------------------------------------------------------------------------------ GrB_Info ipagerank // GrB_SUCCESS or error condition ( iPageRank **Phandle, // output: pointer to array of iPageRank structs GrB_Matrix A // input graph, not modified ) { //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Info info ; uint64_t *X = NULL ; GrB_Index n, *I = NULL ; iPageRank *P = NULL ; GrB_Vector r = NULL ; GrB_UnaryOp op_scale = NULL, op_div = NULL ; GrB_Matrix C = NULL ; (*Phandle) = NULL ; // n = size (A,1) ; // number of nodes OK (GrB_Matrix_nrows (&n, A)) ; // ic = 912680550 if ZSCALE = 2^30 // double c = 0.85 ; // probability of walking to random neighbor // ic = 0.85 * ZSCALE ; // scaled integer version of c ic = 912680550 ; // Note the random number generate used here differs from MATLAB, so this // function will not compute exactly the same thing as ipagerank.m. // since rand() is in the range 0 to RAND_MAX, the sum of all unscaled // rand() values will be about n*RMAX/2. The desired sum(r) = ZSCALE, // so scale each value from rand() by 2*ZSCALE / (RMAX*n) #define RMAX (((uint64_t) RAND_MAX) + 1) // r = rand (1,n) ; // random initial pageranks srand ((int) n) ; OK (GrB_Vector_new (&r, GrB_UINT64, n)) ; for (int64_t i = 0 ; i < n ; i++) { // get a random integer in the range 0 to RMAX-1. uint64_t x = (uint64_t) rand ( ) ; // ZSCALE = 2^30 so 2*ZSCALE = 2^31. RMAX is typically 2^31. So the // ratio 2*ZSCALE / RMAX is typically equal to 1. In that case, // neither of the two if-cases need to be used. The test is a // compile-time constant so the compiler should be able to remove all // of if-test code below. But do this anway in case RMAX is not 2^31. if (2*ZSCALE > RMAX) { x = x * (2*ZSCALE / RMAX) ; } else if (2*ZSCALE < RMAX) { // RMAX is larger than 2*ZSCALE, so instead of multiplying // by (2*ZSCALE / RMAX), divide by the reciprocal. x = x / (RMAX / 2*ZSCALE) ; } // finish the scaling by dividing by n x = x / n ; // ensure x > 0 however if (x == 0) x = 1 ; // now each r(i) is in the range 1 to 2*ZSCALE/n, and the expected value // of sum (r) will be ZSCALE. OK (GrB_Vector_setElement (r, x, i)) ; } // double a = (1-c) / n ; // to jump to any random node in entire graph // ZSCALE - ic = 161061274 if ZSCALE = 2^31 uint64_t ia = ZSCALE - ic ; // scaled integer version of (1-c) ia = ia / n ; if (ia == 0) ia = 1 ; // ensure ia > 0 OK (irowscale (&C, A)) ; // C = scale A by out-degree // create unary operators OK (GrB_UnaryOp_new (&op_scale, iscale, GrB_UINT64, GrB_UINT64)) ; OK (GrB_UnaryOp_new (&op_div, idiv, GrB_UINT64, GrB_UINT64)) ; //-------------------------------------------------------------------------- // iterate to compute the pagerank of each node //-------------------------------------------------------------------------- for (int i = 0 ; i < 20 ; i++) { // r = floor ((floor (floor ((c*r)/z) * C) + floor (a * sum (r))) / z) ; // with implicit floor: // r = ((((c*r) / z) * C) + (a * sum (r))) / z ; // s = ia * sum (r) ; uint64_t s ; OK (GrB_reduce (&s, NULL, GxB_PLUS_UINT64_MONOID, r, NULL)) ; s = s * ia ; // r = ic * r OK (GrB_apply (r, NULL, NULL, op_scale, r, NULL)) ; // r = r / ZSCALE OK (GrB_apply (r, NULL, NULL, op_div, r, NULL)) ; // r = r * C OK (GrB_vxm (r, NULL, NULL, GxB_PLUS_TIMES_UINT64, r, C, NULL)) ; // r = r + s OK (GrB_assign (r, NULL, GrB_PLUS_UINT64, s, GrB_ALL, n, NULL)) ; // r = r / ZSCALE OK (GrB_apply (r, NULL, NULL, op_div, r, NULL)) ; } //-------------------------------------------------------------------------- // sort the nodes by pagerank //-------------------------------------------------------------------------- // [r,irank] = sort (r, 'descend') ; // [I,X] = find (r) ; X = malloc (n * sizeof (uint64_t)) ; I = malloc (n * sizeof (GrB_Index)) ; CHECK (I != NULL && X != NULL, GrB_OUT_OF_MEMORY) ; GrB_Index nvals = n ; OK (GrB_Vector_extractTuples (I, X, &nvals, r)) ; // this will always be true since r is dense, but double-check anyway: CHECK (nvals == n, GrB_PANIC) ; // r no longer needed GrB_free (&r) ; // P = struct (X,I) P = malloc (n * sizeof (iPageRank)) ; CHECK (P != NULL, GrB_OUT_OF_MEMORY) ; for (int64_t k = 0 ; k < nvals ; k++) { // The kth ranked page is P[k].page (with k=0 being the highest rank), // and its pagerank is P[k].pagerank. P [k].pagerank = X [k] ; // I [k] == k will be true for SuiteSparse:GraphBLAS but in general I // can be returned in any order, so use I [k] instead of k, for other // GraphBLAS implementations. P [k].page = I [k] ; } // free workspace FREEWORK ; // qsort (P) in descending order qsort (P, n, sizeof (iPageRank), icompar) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- (*Phandle) = P ; return (GrB_SUCCESS) ; }
matrix.h
/** * @file matrix.h This code provide a templated matrix implementation * @author TPOC: contact@palisade-crypto.org * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef LBCRYPTO_MATH_MATRIX_H #define LBCRYPTO_MATH_MATRIX_H #include <iostream> #include <functional> #include <cmath> #include "../math/backend.h" #include "../lattice/backend.h" #include "../math/nbtheory.h" #include "../math/distrgen.h" #include "../encoding/encodings.h" #include "../utils/inttypes.h" #include "../utils/utilities.h" #include "../utils/memory.h" using std::invalid_argument; namespace lbcrypto { template<class Element> class Matrix : public Serializable { public: typedef vector<vector<Element>> data_t; typedef vector<Element> data_row_t; typedef std::function<Element(void)> alloc_func; /** * Constructor that initializes matrix values using a zero allocator * * @param &allocZero lambda function for zero initialization. * @param &rows number of rows. * @param &rows number of columns. */ Matrix(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) { data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } //TODO: add Clear(); /** * Constructor that initializes matrix values using a distribution generation allocator * * @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects) * @param &rows number of rows. * @param &rows number of columns. * @param &allocGen lambda function for initialization using a distribution generator. */ Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen); /** * Constructor of an empty matrix. * SetSize must be called on this matrix to use it * SetAlloc needs to be called if 0 passed to constructor * This mostly exists to support deserializing * * @param &allocZero lambda function for zero initialization. */ Matrix(alloc_func allocZero = 0) : data(), rows(0), cols(0), allocZero(allocZero) {} /** * Set the size of a matrix, elements are zeroed out * * @param rows number of rows * @param cols number of colums */ void SetSize(size_t rows, size_t cols) { if( this->rows != 0 || this->cols != 0 ) { PALISADE_THROW(not_available_error, "You cannot SetSize on a non-empty matrix"); } this->rows = rows; this->cols = cols; data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * SetAllocator - set the function to allocate a zero; * basically only required for deserializer * * @param allocZero */ void SetAllocator(alloc_func allocZero) { this->allocZero = allocZero; } /** * Copy constructor * * @param &other the matrix object to be copied */ Matrix(const Matrix<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) { deepCopyData(other.data); } /** * Assignment operator * * @param &other the matrix object whose values are to be copied * @return the resulting matrix */ Matrix<Element>& operator=(const Matrix<Element>& other); /** * In-place change of the current matrix to a matrix of all ones * * @return the resulting matrix */ Matrix<Element>& Ones(); // Macro for convenient definitions of class implementations of special functions #define ONES_FOR_TYPE(T) \ template<> \ Matrix<T>& Matrix<T>::Ones() { \ for (size_t row = 0; row < rows; ++row) { \ for (size_t col = 0; col < cols; ++col) { \ data[row][col] = 1; \ } \ } \ return *this; \ } /** * In-place modulo reduction * * @return the resulting matrix */ Matrix<Element>& ModEq(const Element &modulus); /** * modular subtraction * * @return the resulting matrix */ Matrix<Element>& ModSubEq(Matrix<Element> const& b, const Element &modulus); /** * Fill matrix using the same element * * @param &val the element the matrix is filled by * * @return the resulting matrix */ Matrix<Element>& Fill(const Element &val); /** * In-place change of the current matrix to Identity matrix * * @return the resulting matrix */ Matrix<Element>& Identity(); #define IDENTITY_FOR_TYPE(T) \ template<> \ Matrix<T>& Matrix<T>::Identity() { \ for (size_t row = 0; row < rows; ++row) { \ for (size_t col = 0; col < cols; ++col) { \ if (row == col) { \ data[row][col] = 1; \ } else { \ data[row][col] = 0; \ } \ } \ } \ return *this; \ } /** * Sets the first row to be powers of two for when the base is two * * @param base is the base the digits of the matrix are represented in * @return the resulting matrix */ Matrix<Element> GadgetVector(int64_t base = 2) const; #define GADGET_FOR_TYPE(T) \ template<> \ Matrix<T> Matrix<T>::GadgetVector(int64_t base) const { \ Matrix<T> g(allocZero, rows, cols); \ auto base_matrix = allocZero(); \ size_t k = cols/rows; \ base_matrix = base; \ g(0, 0) = 1; \ for (size_t i = 1; i < k; i++) { \ g(0, i) = g(0, i-1) * base_matrix; \ } \ for (size_t row = 1; row < rows; row++) { \ for (size_t i = 0; i < k; i++) { \ g(row, i + row*k) = g(0, i); \ } \ } \ return g; \ } #define GADGET_FOR_TYPE_DCRT(T) \ template<> \ Matrix<T> Matrix<T>::GadgetVector(int64_t base) const \ { \ Matrix<T> g(allocZero, rows, cols); \ auto base_matrix = allocZero(); \ base_matrix = base; \ size_t bk = 1; \ \ auto params = g(0,0).GetParams()->GetParams(); \ \ uint64_t digitCount = (long)ceil(log2(params[0]->GetModulus().ConvertToDouble())/log2(base)); \ \ for (size_t k = 0; k < digitCount; k++) { \ for (size_t i = 0; i < params.size(); i++) { \ NativePoly temp(params[i]); \ temp = bk; \ g(0,k+i*digitCount).SetElementAtIndex(i,temp); \ } \ bk *= base; \ } \ \ size_t kCols = cols/rows; \ for (size_t row = 1; row < rows; row++) { \ for (size_t i = 0; i < kCols; i++) { \ g(row, i + row*kCols) = g(0, i); \ } \ } \ return g; \ } /** * Computes the infinity norm * * @return the norm in double format */ double Norm() const; #define NORM_FOR_TYPE(T) \ template<> \ double Matrix<T>::Norm() const { \ double retVal = 0.0; \ double locVal = 0.0; \ for (size_t row = 0; row < rows; ++row) { \ for (size_t col = 0; col < cols; ++col) { \ locVal = data[row][col].Norm(); \ if (locVal > retVal) { \ retVal = locVal; \ } \ } \ } \ return retVal; \ } /** * Matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> Mult(Matrix<Element> const& other) const; /** * Operator for matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> operator*(Matrix<Element> const& other) const { return Mult(other); } /** * Multiplication of matrix by a scalar * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> ScalarMult(Element const& other) const { Matrix<Element> result(*this); #pragma omp parallel for for (size_t col = 0; col < result.cols; ++col) { for (size_t row = 0; row < result.rows; ++row) { result.data[row][col] = result.data[row][col] * other; } } return result; } /** * Operator for scalar multiplication * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> operator*(Element const& other) const { return ScalarMult(other); } /** * Equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool Equal(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { return false; } for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { if (data[i][j] != other.data[i][j]) { return false; } } } return true; } /** * Operator for equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator==(Matrix<Element> const& other) const { return Equal(other); } /** * Operator for non-equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator!=(Matrix<Element> const& other) const { return !Equal(other); } /** * Get property to access the data as a vector of vectors * * @return the data as vector of vectors */ const data_t& GetData() const { return data; } /** * Get property to access the number of rows in the matrix * * @return the number of rows */ size_t GetRows() const { return rows; } /** * Get property to access the number of columns in the matrix * * @return the number of columns */ size_t GetCols() const { return cols; } /** * Get property to access the zero allocator for the matrix * * @return the lambda function corresponding to the element zero allocator */ alloc_func GetAllocator() const { return allocZero; } /** * Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method * * @param &format the enum value corresponding to coefficient or evaluation representation */ void SetFormat(Format format); /** * Matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> Add(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { PALISADE_THROW(math_error, "Addition operands have incompatible dimensions"); } Matrix<Element> result(*this); #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { result.data[i][j] += other.data[i][j]; } } return result; } /** * Operator for matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> operator+(Matrix<Element> const& other) const { return this->Add(other); } /** * Operator for in-place addition * * @param &other the matrix to be added * @return the resulting matrix (same object) */ Matrix<Element>& operator+=(Matrix<Element> const& other); /** * Matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> Sub(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { PALISADE_THROW(math_error, "Subtraction operands have incompatible dimensions"); } Matrix<Element> result(allocZero, rows, other.cols); #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { result.data[i][j] = data[i][j] - other.data[i][j]; } } return result; } /** * Operator for matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> operator-(Matrix<Element> const& other) const { return this->Sub(other); } /** * Operator for in-place matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix (same object) */ Matrix<Element>& operator-=(Matrix<Element> const& other); /** * Matrix transposition * * @return the resulting matrix */ Matrix<Element> Transpose() const; // YSP The signature of this method needs to be changed in the future /** * Matrix determinant - found using Laplace formula with complexity O(d!), where d is the dimension * * @param *result where the result is stored */ void Determinant(Element *result) const; //Element Determinant() const; /** * Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j} * * @return the cofactor matrix for the given matrix */ Matrix<Element> CofactorMatrix() const; /** * Add rows to bottom of the matrix * * @param &other the matrix to be added to the bottom of current matrix * @return the resulting matrix */ Matrix<Element>& VStack(Matrix<Element> const& other); /** * Add columns the right of the matrix * * @param &other the matrix to be added to the right of current matrix * @return the resulting matrix */ Matrix<Element>& HStack(Matrix<Element> const& other); /** * Matrix indexing operator - writeable instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element& operator()(size_t row, size_t col) { return data[row][col]; } /** * Matrix indexing operator - read-only instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element const& operator()(size_t row, size_t col) const { return data[row][col]; } /** * Matrix row extractor * * @param &row row index * @return the row at the index */ Matrix<Element> ExtractRow(size_t row) const { Matrix<Element> result(this->allocZero,1,this->cols); int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(0,i) = *elem; i++; } return result; //return *this; } /** * Matrix column extractor * * @param &col col index * @return the col at the index */ Matrix<Element> ExtractCol(size_t col) const { Matrix<Element> result(this->allocZero,this->rows,1); for (size_t i = 0; i < this->rows; i++) { result(i,0) = data[i][col]; } return result; //return *this; } /** * Matrix rows extractor in a range from row_start to row_and; inclusive * * @param &row_start &row_end row indices * @return the rows in the range delimited by indices inclusive */ inline Matrix<Element> ExtractRows(size_t row_start, size_t row_end) const { Matrix<Element> result(this->allocZero,row_end-row_start+1,this->cols); for(usint row=row_start; row<row_end+1; row++) { int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(row-row_start,i) = *elem; i++; } } return result; } friend std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m) { os << "[ "; for (size_t row = 0; row < m.GetRows(); ++row) { os << "[ "; for (size_t col = 0; col < m.GetCols(); ++col) { os << m(row,col) << " "; } os << "]\n"; } os << " ]\n"; return os; } /** * Call switch format for each (ring) element * */ void SwitchFormat(); #define NOT_AN_ELEMENT_MATRIX(T) \ template<> \ void Matrix<T>::SwitchFormat() { \ PALISADE_THROW(not_available_error, "Not a matrix of Elements"); \ } /* * Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each * row of the matrix to be added and placed into the corresponding position in the output vector. */ Matrix<Element> MultByUnityVector() const; /* * Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select * elements in each row together. * Return a vector that is a rows x 1 matrix. */ Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const; template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::make_nvp("d", data) ); ar( ::cereal::make_nvp("r", rows) ); ar( ::cereal::make_nvp("c", cols) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::make_nvp("d", data) ); ar( ::cereal::make_nvp("r", rows) ); ar( ::cereal::make_nvp("c", cols) ); // users will need to SetAllocator for any newly deserialized matrix } std::string SerializedObjectName() const { return "Matrix"; } static uint32_t SerializedVersion() { return 1; } private: data_t data; size_t rows; size_t cols; alloc_func allocZero; //mutable int NUM_THREADS = 1; //deep copy of data - used for copy constructor void deepCopyData(data_t const& src) { data.clear(); data.resize(src.size()); for (size_t row = 0; row < src.size(); ++row) { for (auto elem = src[row].begin(); elem != src[row].end(); ++elem) { data[row].push_back(*elem); } } } }; /** * Operator for scalar multiplication of matrix * * @param &e element * @param &M matrix * @return the resulting matrix */ template<class Element> Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) { return M.ScalarMult(e); } /** * Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ template<typename Element> Matrix<typename Element::Integer> Rotate(Matrix<Element> const& inMat); /** * Each element becomes a square matrix with columns of that element's * rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ template<typename Element> Matrix<typename Element::Vector> RotateVecResult(Matrix<Element> const& inMat); /** * Stream output operator * * @param &os stream * @param &m matrix to be outputted * @return the chained stream */ template<class Element> std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m); /** * Gives the Choleshky decomposition of the input matrix. * The assumption is that covariance matrix does not have large coefficients because it is formed by * discrete gaussians e and s; this implies int32_t can be used * This algorithm can be further improved - see the Darmstadt paper section 4.4 * http://eprint.iacr.org/2013/297.pdf * * @param &input the matrix for which the Cholesky decomposition is to be computed * @return the resulting matrix of floating-point numbers */ Matrix<double> Cholesky(const Matrix<int32_t> &input); void Cholesky(const Matrix<int32_t> &input, Matrix<double> &result); /** * Convert a matrix of integers from BigInteger to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger> &input, const BigInteger& modulus); /** * Convert a matrix of BigVector to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigVector> &input, const BigInteger& modulus); /** * Split a vector of int32_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template<typename Element> Matrix<Element> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename Element::Params> params); #define SPLIT64_FOR_TYPE(T) \ template<> \ Matrix<T> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, COEFFICIENT); \ size_t rows = other.GetRows() / n; \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int64_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row*n + i, 0); \ result(row, 0) = values; \ } \ return result; \ } /** * Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template<typename Element> Matrix<Element> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<typename Element::Params> params); #define SPLIT32ALT_FOR_TYPE(T) \ template<> \ Matrix<T> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, COEFFICIENT); \ size_t rows = other.GetRows(); \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int32_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row, i); \ result(row, 0) = values; \ } \ return result; \ } /** * Split a vector of int64_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template<typename Element> Matrix<Element> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename Element::Params> params); #define SPLIT64ALT_FOR_TYPE(T) \ template<> \ Matrix<T> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, COEFFICIENT); \ size_t rows = other.GetRows(); \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int64_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row, i); \ result(row, 0) = values; \ } \ return result; \ } } #endif // LBCRYPTO_MATH_MATRIX_H
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList( SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> Completer = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
GB_unaryop__lnot_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_uint16 // op(A') function: GB_tran__lnot_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_uint16 ( int8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
evec_interface_impl.h
template<typename GridPolicies> void EvecInterface<GridPolicies>::CGNE_MdagM(Grid::SchurDiagMooeeOperator<GridDirac,GridFermionField> &linop, typename GridPolicies::GridFermionField &solution, const typename GridPolicies::GridFermionField &source, double resid, int max_iters){ Grid::ConjugateGradient<GridFermionField> CG(resid, max_iters); CG(linop, source, solution); } //BFM evecs #ifdef USE_BFM_LANCZOS template<typename GridPolicies> class EvecInterfaceBFM: public EvecInterface<GridPolicies>{ typedef typename GridPolicies::GridFermionField GridFermionField; typename GridPolicies::FgridFclass FgridFclass; BFM_Krylov::Lanczos_5d<double> &eig; bfm_evo<double> &dwf; FgridFclass *latg; double *cps_tmp_d; Fermion_t bq_tmp_bfm; bool singleprec_evecs; int len; GridFermionField *tmp_full; public: EvecInterfaceBFM(BFM_Krylov::Lanczos_5d<double> &_eig, bfm_evo<double> &_dwf, Lattice &lat, const bool _singleprec_evecs): eig(_eig), dwf(_dwf), singleprec_evecs(_singleprec_evecs){ len = 24 * eig.dop.node_cbvol * (1 + dwf.gparity) * eig.dop.cbLs; cps_tmp_d = (double*)malloc(len * sizeof(double)); bq_tmp_bfm = dwf.allocCompactFermion(); assert(lat.Fclass() == GridPolicies::FGRID_CLASS_NAME); assert(dwf.precon_5d == 0); latg = dynamic_cast<FgridFclass*>(&lat); Grid::GridCartesian *FGrid = latg->getFGrid(); tmp_full = new GridFermionField(FGrid); const int gparity = GJP.Gparity(); if(eig.dop.gparity != gparity){ ERR.General("EvecInterfaceBFM","EvecInterfaceBFM","Gparity must be disabled/enabled for *both* CPS and the eigenvectors"); } } Float getEvec(GridFermionField &into, const int idx){ omp_set_num_threads(bfmarg::threads); //Copy bq[i][1] into bq_tmp if(singleprec_evecs){ // eig->bq is in single precision //Upcast the float type to double #pragma omp parallel for for(int j = 0; j < len; j++) { ((double*)bq_tmp_bfm)[j] = ((float*)(eig.bq[idx][1]))[j]; } //Use bfm_evo to convert to a CPS field dwf.cps_impexcbFermion<double>(cps_tmp_d, bq_tmp_bfm, 0, Odd); }else{ // eig.bq is in double precision //Use bfm_evo to convert to a CPS field dwf.cps_impexcbFermion<double>(cps_tmp_d, eig.bq[idx][1], 0, Odd); } //Use Fgrid to convert to a Grid field *tmp_full = Grid::zero; latg->ImportFermion(*tmp_full, (Vector*)cps_tmp_d, FgridBase::Odd); pickCheckerboard(Odd,into,*tmp_full); return eig.evals[idx]; } int nEvecs() const{ return eig.get; } ~EvecInterfaceBFM(){ free(cps_tmp_d); dwf.freeFermion(bq_tmp_bfm); delete tmp_full; } }; #endif #ifdef USE_GRID_LANCZOS template<typename GridPolicies> class EvecInterfaceGrid: public EvecInterface<GridPolicies>{ typedef typename GridPolicies::GridFermionField GridFermionField; const std::vector<Grid::RealD> &eval; const std::vector<GridFermionField> &evec; public: EvecInterfaceGrid(const std::vector<GridFermionField> &_evec, const std::vector<Grid::RealD> &_eval): evec(_evec), eval(_eval){} Float getEvec(GridFermionField &into, const int idx){ into = evec[idx]; return eval[idx]; } int nEvecs() const{ return eval.size(); } }; //Fed to mixed precision solver to improve inner solve guesses using single prec eigenvectors template<typename GridFermionField> class deflateGuess: public Grid::LinearFunction<GridFermionField>{ const std::vector<Grid::RealD> &eval; const std::vector<GridFermionField> &evec; public: deflateGuess(const std::vector<GridFermionField> &_evec, const std::vector<Grid::RealD> &_eval): evec(_evec), eval(_eval){} void operator() (const GridFermionField &src, GridFermionField &sol){ for(int i=0;i<eval.size();i++){ Grid::ComplexD cn = innerProduct(evec[i], src); axpy(sol, cn / eval[i], evec[i], sol); } } }; template<typename GridPolicies> class EvecInterfaceGridSinglePrec: public EvecInterface<GridPolicies>{ typedef typename GridPolicies::GridFermionField GridFermionField; typedef typename GridPolicies::FgridFclass FgridFclass; typedef typename GridPolicies::GridDirac GridDirac; typedef typename GridPolicies::GridDirac::GaugeField GridGaugeField; typedef typename GridPolicies::GridDiracF GridDiracF; typedef typename GridPolicies::GridFermionFieldF GridFermionFieldF; typedef typename GridPolicies::GridDiracF::GaugeField GridGaugeFieldF; const std::vector<Grid::RealD> &eval; const std::vector<GridFermionFieldF> &evec; Grid::GridRedBlackCartesian * FrbGrid_f; GridGaugeFieldF *Umu_f; GridDiracF* Ddwf_f; Grid::SchurDiagMooeeOperator<GridDiracF,GridFermionFieldF> *Linop_f; bool delete_FrbGrid_f; //if this object news the grid rather than imports it, it must be deleted public: EvecInterfaceGridSinglePrec(const std::vector<GridFermionFieldF> &_evec, const std::vector<Grid::RealD> &_eval, Lattice &lat, const double mass): evec(_evec), eval(_eval), delete_FrbGrid_f(false){ FgridFclass &latg = dynamic_cast<FgridFclass&>(lat); const GridGaugeField & Umu = *latg.getUmu(); //Make a single precision Grid (used by the Mixed prec solver also even if no evecs) std::vector<int> nodes(4); std::vector<int> vol(4); for(int i=0;i<4;i++){ vol[i]= GJP.NodeSites(i)*GJP.Nodes(i);; nodes[i]= GJP.Nodes(i); } Grid::GridCartesian *UGrid_f = Grid::QCD::SpaceTimeGrid::makeFourDimGrid(vol,Grid::GridDefaultSimd(Grid::QCD::Nd,Grid::vComplexF::Nsimd()),nodes); Grid::GridCartesian *FGrid_f = Grid::QCD::SpaceTimeGrid::makeFiveDimGrid(GJP.SnodeSites()*GJP.Snodes(),UGrid_f); Grid::GridRedBlackCartesian *UrbGrid_f = Grid::QCD::SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_f); if(_evec.size() > 0) FrbGrid_f = dynamic_cast<Grid::GridRedBlackCartesian*>(_evec[0]._grid); else{ FrbGrid_f = Grid::QCD::SpaceTimeGrid::makeFiveDimRedBlackGrid(GJP.SnodeSites()*GJP.Snodes(),UGrid_f); delete_FrbGrid_f = true; } Umu_f = new GridGaugeFieldF(UGrid_f); precisionChange(*Umu_f, Umu); const double mob_b = latg.get_mob_b(); const double mob_c = mob_b - 1.; //b-c = 1 const double M5 = GJP.DwfHeight(); typename GridDiracF::ImplParams params; latg.SetParams(params); Ddwf_f = new GridDiracF(*Umu_f,*FGrid_f,*FrbGrid_f,*UGrid_f,*UrbGrid_f,mass,M5,mob_b,mob_c, params); Linop_f = new Grid::SchurDiagMooeeOperator<GridDiracF,GridFermionFieldF>(*Ddwf_f); } ~EvecInterfaceGridSinglePrec(){ delete Umu_f; delete Ddwf_f; delete Linop_f; if(delete_FrbGrid_f) delete FrbGrid_f; } Float getEvec(GridFermionField &into, const int idx){ //get *double precision* eigenvector precisionChange(into,evec[idx]); return eval[idx]; } int nEvecs() const{ return eval.size(); } const std::vector<Grid::RealD> getEvals() const{ return eval; } const std::vector<GridFermionFieldF> getEvecs() const{ return evec; } //Overload high-mode solve to call mixed precision CG with single prec evecs void CGNE_MdagM(Grid::SchurDiagMooeeOperator<GridDirac, GridFermionField> &linop, GridFermionField &solution, const GridFermionField &source, double resid, int max_iters){ deflateGuess<GridFermionFieldF> guesser(evec,eval); Grid::MixedPrecisionConjugateGradient<GridFermionField,GridFermionFieldF> mCG(resid, max_iters, 50, FrbGrid_f, *Linop_f, linop); mCG.useGuesser(guesser); mCG(source,solution); } }; #endif
IndexHNSWlib.h
#pragma once #include <faiss/Index.h> #include <faiss/hnswlib/hnswlib.h> #include <faiss/impl/FaissAssert.h> namespace faiss { struct IndexHNSWlib : Index { float scale; float bias; IndexHNSWlib(size_t d, MetricType metric) : Index(d, metric), scale(1.0f), bias(0.0f) { } virtual ~IndexHNSWlib() { } virtual void setEFConstruction(size_t ef) = 0; virtual void setEFSearch(size_t ef) = 0; virtual size_t getEfConstruction() = 0; virtual size_t getEfSearch() = 0; virtual void save(FILE* file) const = 0; template <typename Tdist, typename Tcorr> static hnswlib::SpaceInterface<Tdist>* createSpace(size_t d, MetricType metric) { if(metric == METRIC_INNER_PRODUCT) { return new hnswlib::InnerProductSpace<Tdist, Tcorr>(d); } else if(metric == METRIC_L2) { return new hnswlib::L2Space<Tdist, Tcorr>(d); } else { FAISS_THROW_FMT("unsupported metric: %d", metric); } } template <typename T> const T* convertVector(size_t d, const float* x, T) const { T* tx = new T [d]; for(size_t i = 0; i < d; i++) { tx[i] = T(x[i] * scale + bias); } return tx; } template <typename T> void deconvertVector(size_t d, const T* x, float* fx) const{ for(size_t i = 0 ; i < d; i++){ fx[i] = ((float)x[i] - bias) / scale; } } template <typename T> inline void deleteConvertedVector(const T* tx) const { delete[] tx; } inline const float* convertVector(size_t, const float* x, float) const { return x; } // inline void deconvertVector(size_t, const float* x, float* fx) const{ // fx = x; // } inline void deleteConvertedVector(const float*) const { } }; template <typename Tdist, typename Tcorr> struct IndexHNSWlibImpl : IndexHNSWlib { const size_t INIT_MAX_ELEMENTS = 1UL << 20; hnswlib::HierarchicalNSW<Tdist>* hnsw; IndexHNSWlibImpl(size_t d, size_t M, MetricType metric = METRIC_L2) : IndexHNSWlib(d, metric) { hnsw = new hnswlib::HierarchicalNSW<Tdist>(createSpace<Tdist, Tcorr>(d, metric), new hnswlib::VmemLevel0, INIT_MAX_ELEMENTS, M); } IndexHNSWlibImpl(size_t d, FILE* file, MetricType metric = METRIC_L2) : IndexHNSWlib (d, metric) { hnswlib::Level0StorageInterface* storage; char* env_pmem = getenv("USE_PMEM"); if(env_pmem && strcmp(env_pmem, "1") == 0) { storage = new hnswlib::PmemLevel0; } else { storage = new hnswlib::VmemLevel0; } hnsw = new hnswlib::HierarchicalNSW<Tdist>(createSpace<Tdist, Tcorr>(d, metric), storage, file); } ~IndexHNSWlibImpl() { delete hnsw; } void setEFConstruction(size_t ef) override { hnsw->setEfConstruction(ef); } void setEFSearch(size_t ef) override { hnsw->setEfSearch(ef); } size_t getEfConstruction() override{ return hnsw->getEfConstruction(); } size_t getEfSearch() override{ return hnsw->getEfSearch(); } void save(FILE* file) const { hnsw->saveIndex(file); } void add(idx_t n, const float* x) override { add_with_ids(n, x, nullptr); } void add_with_ids(idx_t n, const float* x, const idx_t* xids) override { size_t max_elements = hnsw->getMaxElement(); bool need_resize = false; while(ntotal + n > max_elements) { need_resize = true; max_elements = size_t(max_elements * 1.5); } if(need_resize) { hnsw->resizeIndex(max_elements); } FAISS_ASSERT (ntotal + n <= hnsw->getMaxElement()); #pragma omp parallel for for(idx_t i = 0; i < n; i++) { const float* xi = x + d * i; const Tcorr* txi = convertVector(d, xi, Tcorr()); hnsw->addPoint(txi, xids ? xids[i] : ntotal + i); deleteConvertedVector(txi); } ntotal += n; if(verbose) { printf("%lu vectors newly added, now %lu totally\n", n, ntotal); } } void reset() override { size_t M = hnsw->getM(); size_t ef_construction = hnsw->getEfConstruction(); size_t ef_search = hnsw->getEfSearch(); delete hnsw; hnsw = new hnswlib::HierarchicalNSW<Tdist>(createSpace<Tdist, Tcorr>(d, metric_type), new hnswlib::VmemLevel0, INIT_MAX_ELEMENTS, M, ef_construction); hnsw->setEfSearch(ef_search); ntotal = 0; } void reconstruct (idx_t key, float* recons) const override{ Tcorr* temp = (Tcorr*)(hnsw->getDataByInternalId(key)); deconvertVector(d, temp, recons); } void search(idx_t n, const float* x, idx_t k, float* distances, idx_t* labels) const override { #pragma omp parallel for for(idx_t i = 0; i < n; i++) { const float* xi = x + i * d; const Tcorr* txi = convertVector(d, xi, Tcorr()); auto topk = hnsw->searchKnn(txi, k); deleteConvertedVector(txi); float* distances_i = distances + (i + 1) * k - 1; idx_t* labels_i = labels + (i + 1) * k - 1; for(idx_t j = 0; j < k; j++) { auto& entry = topk.top(); *distances_i = float(entry.first); *labels_i = entry.second; distances_i--; labels_i--; topk.pop(); } } } }; using IndexHNSWlibFp32 = IndexHNSWlibImpl<float, float>; using IndexHNSWlibBfp16 = IndexHNSWlibImpl<float, bfp16_t>; using IndexHNSWlibInt16 = IndexHNSWlibImpl<int64_t, int16_t>; using IndexHNSWlibInt8 = IndexHNSWlibImpl<int, int8_t>; }
Mrpt.h
/******************************************************** * Ville Hyvönen & Teemu Pitkänen * * HIIT / University of Helsinki * * ville.o.hyvonen<at>helsinki.fi * * teemu.pitkanen<at>cs.helsinki.fi * * 2016 * ********************************************************/ #ifndef CPP_MRPT_H_ #define CPP_MRPT_H_ #include <algorithm> #include <functional> #include <numeric> #include <queue> #include <random> #include <string> #include <vector> #include <Eigen/Dense> #include <Eigen/SparseCore> using namespace Eigen; /** * This class defines the elements that are stored in the priority queue for * the extra branch / priority queue trick. An instance of the class describes a * single node in a rp-tree in a single query. The most important field * gap_width tells the difference of the split value used in this node and the * projection of the query vector in this node. This is used as a criterion to * choose extra branches -- a small distance indicates that some neighbors may * easily end up on the other side of split. The rest of the fields are needed * to start a tree traversal from the node "on the other side of the split", * and the methods are needed for sorting in the priority queue. */ class Gap { public: Gap() { } Gap(int tree_, int node_, int level_, double gap_width_) : tree(tree_), node(node_), level(level_), gap_width(gap_width_) { } friend bool operator<(const Gap &a, const Gap &b) { return a.gap_width < b.gap_width; } friend bool operator>(const Gap &a, const Gap &b) { return a.gap_width > b.gap_width; } int tree; // The ordinal of the tree int node; // The node corresponding to the other side of the split int level; // The level in the tree where node lies double gap_width; // The gap between the query projection and split value at the parent of node }; class Mrpt { public: /** * The constructor of the index. The inputs are the data for which the index * will be built and additional parameters that affect the accuracy of the NN * approximation. Concisely, larger n_trees_ or smaller depth values improve * accuracy but slow down the queries. A general rule for the right balance is * not known. The constructor does not actually build the trees, but that is * done by a separate function 'grow' that has to be called before queries can * be made. * @param X_ - Pointer to a matrix containing the data. * @param n_trees_ - The number of trees to be used in the index. * @param depth_ - The depth of the trees. * @param density_ - Expected ratio of non-zero components in a projection matrix. */ Mrpt(const Map<const MatrixXf> *X_, int n_trees_, int depth_, float density_) : X(X_), n_samples(X_->cols()), dim(X_->rows()), n_trees(n_trees_), depth(depth_), density(density_), n_pool(n_trees_ * depth_), n_array(1 << (depth_ + 1)) { } ~Mrpt() {} /** * The function whose call starts the actual index construction. Initializes * arrays to store the tree structures and computes all the projections needed * later. Then repeatedly calls method grow_subtree that builds a single RP-tree. */ void grow() { // generate the random matrix density < 1 ? build_sparse_random_matrix() : build_dense_random_matrix(); split_points = MatrixXf(n_array, n_trees); VectorXi indices(n_samples); std::iota(indices.data(), indices.data() + n_samples, 0); tree_leaves = std::vector<std::vector<VectorXi>>(n_trees); #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { MatrixXf tree_projections; if (density < 1) tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * *X; else tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * *X; std::vector<VectorXi> t = grow_subtree(indices, 0, 0, n_tree, tree_projections); tree_leaves[n_tree] = t; } } /** * This function finds the k approximate nearest neighbors of the query object * q. The accuracy of the query depends on both the parameters used for index * construction and additional parameters given to this function. This * function implements two tricks to improve performance. The voting trick * interprets each index object in leaves returned by tree traversals as votes, * and only performs the final linear search with the 'elect' most voted * objects. The priority queue trick keeps track of nodes where the split value * was close to the projection so that we can split the tree traversal to both * subtrees if we want. * @param q - The query object whose neighbors the function finds * @param k - The number of neighbors the user wants the function to return * @param votes_required - The number of votes required for an object to be included in the linear search step * @param branches - The number of extra branches explored in the priority queue trick * @param out - The output buffer * @return */ void query(const Map<VectorXf> &q, int k, int votes_required, int branches, int *out) const { VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; int found_leaves[n_trees]; Gap found_branches[n_trees * depth]; /* * The following loops over all trees, and routes the query to exactly one * leaf in each. */ #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; found_branches[n_tree * depth + d] = Gap(n_tree, idx_right, j + 1, split_point - projected_query(j)); } else { idx_tree = idx_right; found_branches[n_tree * depth + d] = Gap(n_tree, idx_left, j + 1, projected_query(j) - split_point); } } found_leaves[n_tree] = idx_tree - (1 << depth) + 1; } int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1; VectorXi elected(n_trees * max_leaf_size); VectorXi votes = VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { const VectorXi &idx_one_tree = tree_leaves[n_tree][found_leaves[n_tree]]; const int nn = idx_one_tree.size(), *data = idx_one_tree.data(); for (int i = 0; i < nn; ++i, ++data) { if (++votes(*data) == votes_required) { elected(n_elected++) = *data; } } } /* * The following loop routes the query to extra leaves in the same trees * handled already once above. The extra branches are popped from the * priority queue and routed down the tree just as new root-to-leaf queries. */ std::priority_queue<Gap, std::vector<Gap>, std::greater<Gap>> pq(found_branches, found_branches + n_trees * depth); for (int b = 0; b < branches; ++b) { if (pq.empty()) break; Gap gap(pq.top()); pq.pop(); int j = gap.level; int idx_tree = gap.node; for (; j % depth; ++j) { const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, gap.tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; pq.push(Gap(gap.tree, idx_right, j + 1, split_point - projected_query(j))); } else { idx_tree = idx_right; pq.push(Gap(gap.tree, idx_left, j + 1, projected_query(j) - split_point)); } } const VectorXi &idx_one_tree = tree_leaves[gap.tree][idx_tree - (1 << depth) + 1]; const int nn = idx_one_tree.size(), *data = idx_one_tree.data(); for (int i = 0; i < nn; ++i, ++data) { if (++votes(*data) == votes_required) { elected(n_elected++) = *data; } } } if (n_elected < k) { /* * If not enough samples had at least votes_required * votes, find the maximum amount of votes needed such * that the final search set size has at least k samples */ VectorXf::Index max_index; votes.maxCoeff(&max_index); int max_votes = votes(max_index); VectorXi vote_count = VectorXi::Zero(max_votes + 1); for (int i = 0; i < n_samples; ++i) vote_count(votes(i))++; for (int would_elect = 0; max_votes; --max_votes) { would_elect += vote_count(max_votes); if (would_elect >= k) break; } for (int i = 0; i < n_samples; ++i) { if (votes(i) >= max_votes && votes(i) < votes_required) elected(n_elected++) = i; } } exact_knn(q, k, elected, n_elected, out); } /** * find k nearest neighbors from data for the query point * @param q - query point as a vector * @param k - number of neighbors searched for * @param indices - indices of the points in the original matrix where the search is made * @param out - output buffer * @return */ void exact_knn(const Map<VectorXf> &q, int k, const VectorXi &indices, int n_elected, int *out) const { VectorXf distances(n_elected); #pragma omp parallel for for (int i = 0; i < n_elected; ++i) distances(i) = (X->col(indices(i)) - q).squaredNorm(); if (k == 1) { MatrixXf::Index index; distances.minCoeff(&index); out[0] = indices(index); return; } VectorXi idx(n_elected); std::iota(idx.data(), idx.data() + n_elected, 0); std::nth_element(idx.data(), idx.data() + k, idx.data() + n_elected, [&distances](int i1, int i2) {return distances(i1) < distances(i2);}); for (int i = 0; i < k; ++i) out[i] = indices(idx(i)); } /** * Saves the index to a file. * @param path - Filepath to the output file. * @return True if saving succeeded, false otherwise. */ bool save(const char *path) const { FILE *fd; if ((fd = fopen(path, "wb")) == NULL) return false; fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd); // save tree leaves for (int i = 0; i < n_trees; ++i) { int sz = tree_leaves[i].size(); fwrite(&sz, sizeof(int), 1, fd); for (int j = 0; j < sz; ++j) { int lsz = tree_leaves[i][j].size(); fwrite(&lsz, sizeof(int), 1, fd); fwrite(tree_leaves[i][j].data(), sizeof(int), lsz, fd); } } // save random matrix if (density < 1) { int non_zeros = sparse_random_matrix.nonZeros(); fwrite(&non_zeros, sizeof(int), 1, fd); for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) { for (SparseMatrix<float, RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) { float val = it.value(); int row = it.row(), col = it.col(); fwrite(&row, sizeof(int), 1, fd); fwrite(&col, sizeof(int), 1, fd); fwrite(&val, sizeof(float), 1, fd); } } } else { fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Loads the index from a file. * @param path - Filepath to the index file. * @return True if loading succeeded, false otherwise. */ bool load(const char *path) { FILE *fd; if ((fd = fopen(path, "rb")) == NULL) return false; split_points = MatrixXf(n_array, n_trees); fread(split_points.data(), sizeof(float), n_array * n_trees, fd); // load tree leaves tree_leaves = std::vector<std::vector<VectorXi>>(n_trees); for (int i = 0; i < n_trees; ++i) { int sz; fread(&sz, sizeof(int), 1, fd); std::vector<VectorXi> leaves(sz); for (int j = 0; j < sz; ++j) { int leaf_size; fread(&leaf_size, sizeof(int), 1, fd); VectorXi samples(leaf_size); fread(samples.data(), sizeof(int), leaf_size, fd); leaves[j] = samples; } tree_leaves[i] = leaves; } // load random matrix if (density < 1) { int non_zeros; fread(&non_zeros, sizeof(int), 1, fd); sparse_random_matrix = SparseMatrix<float>(n_pool, dim); std::vector<Triplet<float>> triplets; for (int k = 0; k < non_zeros; ++k) { int row, col; float val; fread(&row, sizeof(int), 1, fd); fread(&col, sizeof(int), 1, fd); fread(&val, sizeof(float), 1, fd); triplets.push_back(Triplet<float>(row, col, val)); } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } else { dense_random_matrix = Matrix<float, Dynamic, Dynamic, RowMajor>(n_pool, dim); fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } private: /** * Builds a single random projection tree. The tree is constructed by recursively * projecting the data on a random vector and splitting into two by the median. * @param indices - The indices left in this branch * @param tree_level - The level in tree where the recursion is at * @param i - The index within the tree where we are at * @param n_tree - The index of the tree within the index * @param tree_projections - Precalculated projection values for the current tree * @return The leaves as a vector of VectorXis */ std::vector<VectorXi> grow_subtree(const VectorXi &indices, int tree_level, int i, int n_tree, const MatrixXf &tree_projections) { int n = indices.size(); int idx_left = 2 * i + 1; int idx_right = idx_left + 1; if (tree_level == depth) { std::vector<VectorXi> v; v.push_back(indices); return v; } VectorXf projections(n); for (int i = 0; i < n; ++i) projections(i) = tree_projections(tree_level, indices(i)); // sort indices of the projections based on their values VectorXi ordered(n); std::iota(ordered.data(), ordered.data() + n, 0); std::sort(ordered.data(), ordered.data() + ordered.size(), [&projections](int i1, int i2) {return projections(i1) < projections(i2);}); int split_point = (n % 2) ? n / 2 : n / 2 - 1; // median split int idx_split_point = ordered(split_point); int idx_split_point2 = ordered(split_point + 1); split_points(i, n_tree) = (n % 2) ? projections(idx_split_point) : (projections(idx_split_point) + projections(idx_split_point2)) / 2; VectorXi left_indices = ordered.head(split_point + 1); VectorXi right_indices = ordered.tail(n - split_point - 1); VectorXi left_elems = VectorXi(left_indices.size()); VectorXi right_elems = VectorXi(right_indices.size()); for (int i = 0; i < left_indices.size(); ++i) left_elems(i) = indices(left_indices(i)); for (int i = 0; i < right_indices.size(); ++i) right_elems(i) = indices(right_indices(i)); std::vector<VectorXi> v = grow_subtree(left_elems, tree_level + 1, idx_left, n_tree, tree_projections); std::vector<VectorXi> w = grow_subtree(right_elems, tree_level + 1, idx_right, n_tree, tree_projections); v.insert(v.end(), w.begin(), w.end()); return v; } /** * Builds a random sparse matrix for use in random projection. The components of * the matrix are drawn from the distribution * * 0 w.p. 1 - a * N(0, 1) w.p. a * * where a = density. */ void build_sparse_random_matrix() { sparse_random_matrix = SparseMatrix<float, RowMajor>(n_pool, dim); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<float> uni_dist(0, 1); std::normal_distribution<float> norm_dist(0, 1); std::vector<Triplet<float>> triplets; for (int j = 0; j < n_pool; ++j) { for (int i = 0; i < dim; ++i) { if (uni_dist(gen) > density) continue; triplets.push_back(Triplet<float>(j, i, norm_dist(gen))); } } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } /* * Builds a random dense matrix for use in random projection. The components of * the matrix are drawn from the standard normal distribution. */ void build_dense_random_matrix() { dense_random_matrix = Matrix<float, Dynamic, Dynamic, RowMajor>(n_pool, dim); std::random_device rd; std::mt19937 gen(rd()); std::normal_distribution<float> normal_dist(0, 1); std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_pool * dim, [&normal_dist, &gen] { return normal_dist(gen); }); } const Map<const MatrixXf> *X; // the data matrix MatrixXf split_points; // all split points in all trees std::vector<std::vector<VectorXi>> tree_leaves; // contains all leaves of all trees, // indexed as tree_leaves[tree number][leaf number][index in leaf] Matrix<float, Dynamic, Dynamic, RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees SparseMatrix<float, RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees const int n_samples; // sample size of data const int dim; // dimension of data const int n_trees; // number of RP-trees const int depth; // depth of an RP-tree with median split const float density; // expected ratio of non-zero components in a projection matrix const int n_pool; // amount of random vectors needed for all the RP-trees const int n_array; // length of the one RP-tree as array }; #endif // CPP_MRPT_H_
fir_testing2.c
// // max_01.c // // // /usr/local/bin/gcc-8 -fopenmp -std=c11 -march=native -O3 -ffast-math -o fir_testing2.app fir_testing2.c // ./fir_testing2.app // gcc -std=c11 -mavx -mfma -O3 -ffast-math -o fir_testing2.app fir_testing2.c // gcc -std=c11 -march=native -O3 -ffast-math -o fir_testing2.app fir_testing2.c /* x = rand(1,1e7); [b,a] = cheby1(7,3,2000/100000); tic for i = 1:10 y = filter(b,1,x); end toc/10 %How much slower when 'a' is used? %about 2x as long tic for i = 1:10 y = filter(b,a,x); end toc/10 tic for i = 1:10 y = sl.array.mex_filter(b,a,x); end toc/10 */ #include <omp.h> #include <immintrin.h> #include <stdio.h> #include <time.h> #include <stdint.h> #define N_BYTES_SIMD 4 int main() { //data[i] < threshold && data[i+1] >= threshold //Double - about the same time for sparse data //Single - noticeably faster ... int n_samples = 1e8; clock_t clock_begin; clock_t clock_end; double *x = calloc(n_samples,sizeof(double)); double *y = calloc(n_samples,sizeof(double)); int filter_length = 8; double c[8] = {0.123,0.234,0.345,0.456,0.567,0.678,0.789,0.890}; double time_spent_std = 0; for (int i = 0; i <n_samples; i++){ x[i] = i; } clock_begin = clock(); //664 - just as slow as simple loop if (0){ __m256d y0; __m256d c0 = _mm256_set1_pd(c[0]); __m256d c1 = _mm256_set1_pd(c[1]); __m256d c2 = _mm256_set1_pd(c[2]); __m256d c3 = _mm256_set1_pd(c[3]); __m256d c4 = _mm256_set1_pd(c[4]); __m256d c5 = _mm256_set1_pd(c[5]); __m256d c6 = _mm256_set1_pd(c[6]); __m256d c7 = _mm256_set1_pd(c[7]); __m256d t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; __m256d f0, f1, f2, f3, f4, f5, f6, f7; __m256d r0, r1; __m256d x03, x47, x12, x36, x21, x25, x30, x14, x41, x52, x63, x74; __m256d x0,x1,x2,x3,x4,x5,x6,x7; for (int j = filter_length; j < n_samples; j+=8) { //7 6 5 4 3 2 1 0 -1 -2 -3 -4 -5 -6 -7 // 0 0 0 0. 0 0 0 0 x03 x47 // 1 1 1 1. 1 1 1 1 x12 x36 // 2 2 2 2. 2 2 2 2 x21 x25 // 3 3 3 3. 3 3 3 3 x30 x14 // 4 4 4 4.4 4 4 4 x41 x03 <= overlap with first entry // 5 5 5 5.5 5 5 5 x52 x12 // 6 6 6 6.6 6 6 6 x63 x21 //7 7 7 7.7 7 7 7 x74 x30 //To collapse we need // 7 6 5 4 3 2 1 0 <= index of final assignment //c0 * 7 6 5 4, 3 2 1 0 x74,x30 //c1 * 6 5 4 3, 2 1 0 -1 x63,x21 t2,t3 //c2 * 5 4 3 2, 1 0 -1 -2 x52 x12 //c3 * 4 3 2 1, 0 -1 -2 -3 x41 x03 t6,t7 //c4 * 3 2 1 0, -1 -2 -3 -4 x30,x14 //c5 * 2 1 0 -1, -2 -3 -4 -5 x21,x25 t10,t11 //c6 * 1 0 -1 -2, -3 -4 -5 -6 x12,x36 //c7 * 0 -1 -2 -3, -4 -5 -6 -7 x03,x47 t14,t15 x74 = _mm256_loadu_pd(&x[j+4]); x63 = _mm256_loadu_pd(&x[j+3]); x52 = _mm256_loadu_pd(&x[j+2]); x41 = _mm256_loadu_pd(&x[j+1]); x30 = _mm256_loadu_pd(&x[j+0]); x21 = _mm256_loadu_pd(&x[j-1]); x12 = _mm256_loadu_pd(&x[j-2]); x03 = _mm256_loadu_pd(&x[j-3]); x14 = _mm256_loadu_pd(&x[j-4]); x25 = _mm256_loadu_pd(&x[j-5]); x36 = _mm256_loadu_pd(&x[j-6]); x47 = _mm256_loadu_pd(&x[j-7]); t0 = _mm256_mul_pd(c0,x74); t1 = _mm256_mul_pd(c0,x30); t2 = _mm256_fmadd_pd(c1,x63,t0); t3 = _mm256_fmadd_pd(c1,x21,t1); t4 = _mm256_mul_pd(c2,x52); t5 = _mm256_mul_pd(c2,x12); t6 = _mm256_fmadd_pd(c3,x41,t4); t7 = _mm256_fmadd_pd(c3,x03,t5); t8 = _mm256_mul_pd(c4,x30); t9 = _mm256_mul_pd(c4,x14); t10 = _mm256_fmadd_pd(c5,x21,t8); t11 = _mm256_fmadd_pd(c5,x25,t9); t12 = _mm256_mul_pd(c6,x12); t13 = _mm256_mul_pd(c6,x36); t14 = _mm256_fmadd_pd(c7,x03,t12); t15 = _mm256_fmadd_pd(c7,x47,t13); f0 = _mm256_add_pd(t3,t7); f1 = _mm256_add_pd(t11,t15); r0 = _mm256_add_pd(f0,f1); _mm256_storeu_pd(&y[j], r0); f2 = _mm256_add_pd(t2,t6); f3 = _mm256_add_pd(t10,t14); r1 = _mm256_add_pd(f2,f3); _mm256_storeu_pd(&y[j+3], r1); } } //573 if (0){ __m256d x0,x1,x2,x3,x4,x5,x6,x7; //0 - 0 back __m256d c0 = _mm256_set1_pd(c[0]); __m256d c1 = _mm256_set1_pd(c[1]); __m256d c2 = _mm256_set1_pd(c[2]); __m256d c3 = _mm256_set1_pd(c[3]); __m256d c4 = _mm256_set1_pd(c[4]); __m256d c5 = _mm256_set1_pd(c[5]); __m256d c6 = _mm256_set1_pd(c[6]); __m256d c7 = _mm256_set1_pd(c[6]); __m256d y0, y1, y2, y3, y4, y5, y6, y7; // 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 (round 1) // 3 2 1 0 <= 0 back // 6 5 4 3 <= 1 back // 9 8 7 6 <= 2 back // // // 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 (round 2) // 7 6 5 4 <= 0 back // 10 9 8 7 //sprintf("stop: %d\n",stops[99]); //omp_set_num_threads(1); //#pragma omp parallel for simd int cur_start = 0; y1 = _mm256_loadu_pd(x); //0:3 => becomes y0 on first run y2 = _mm256_loadu_pd(x+4); //4:7 => becomes y1 on first run y3 = _mm256_loadu_pd(x+8); y4 = _mm256_loadu_pd(x+12); y5 = _mm256_loadu_pd(x+16); y6 = _mm256_loadu_pd(x+20); y7 = _mm256_loadu_pd(x+24); for (int j = 28; j < n_samples; j+=4){ //TODO: Can we avoid the awkward loads???? //Perhaps store temporary variables ... //Can we do the shift in the fma stage???? y0 = y1; y1 = y2; y2 = y3; y3 = y4; y4 = y5; y5 = y6; y6 = y7; x0 = _mm256_loadu_pd(&x[j-28-0]); x1 = _mm256_loadu_pd(&x[j-24-1]); x2 = _mm256_loadu_pd(&x[j-20-2]); x3 = _mm256_loadu_pd(&x[j-16-3]); x4 = _mm256_loadu_pd(&x[j-12-4]); x5 = _mm256_loadu_pd(&x[j-8-5]); x6 = _mm256_loadu_pd(&x[j-4-6]); x7 = _mm256_loadu_pd(&x[j-0-7]); y0 = _mm256_add_pd(c0,x0); _mm256_storeu_pd(&y[j-28], y0); y1 = _mm256_fmadd_pd(c1,x1,y1); y2 = _mm256_fmadd_pd(c2,x2,y2); y3 = _mm256_fmadd_pd(c3,x3,y3); y4 = _mm256_fmadd_pd(c4,x4,y4); y5 = _mm256_fmadd_pd(c5,x5,y5); y6 = _mm256_fmadd_pd(c6,x6,y6); y7 = _mm256_fmadd_pd(c7,x7,y7); } //TODO: We still have a little remaining at the end to handle ... } //653 if (1){ //Standard loop approach ---------------------------------------------- #pragma omp parallel for simd for (int j = filter_length; j < n_samples; j++) { y[j] = c[0]*x[j] + c[1]*x[j-1] + c[2]*x[j-2] + c[3]*x[j-3] + + c[4]*x[j-4] + c[5]*x[j-5] + c[6]*x[j-6] + c[7]*x[j-7]; } } clock_end = clock(); time_spent_std += (double)(clock_end - clock_begin) / CLOCKS_PER_SEC; printf("time (ms): %g\n",1000*time_spent_std); printf("y_last: %g\n",y[n_samples-1]); }
example3.c
#include "emf_mie_mmls.h" #include <sys/stat.h> #include <errno.h> #include <png.h> typedef struct image_data{ char dir_name[64]; // directory name to output image int scale; // number for enlarge the output image int m; // sampling number double rang; // range of sampling int ts; // time step per cycle double complex *ve,*vh; // electromagnetic field data double me[3],mh[3]; // maximum amplitude of each field component }IMD; void directory_name(char *src,char *nn); void make_directory(char *dir_name); void eh_field_x(IMD *id,MSPD *sp); void eh_field_y(IMD *id,MSPD *sp); void eh_field_z(IMD *id,MSPD *sp); void output_field(char *pl,IMD *id,MSPD *sp); // color table png_byte ct1[9][3]={{0x00,0x00,0x90},{0x00,0x0f,0xff},{0x00,0x90,0xff},{0x0f,0xff,0xee}, {0xff,0xff,0xff},{0xff,0xee,0x00},{0xff,0x70,0x00},{0xee,0x00,0x00},{0x7f,0x00,0x00}}; /* png_byte ct1[9][3]={{0x00,0x00,0x90},{0x00,0x0f,0xff},{0x00,0x90,0xff},{0x0f,0xff,0xee}, {0x90,0xff,0x70},{0xff,0xee,0x00},{0xff,0x70,0x00},{0xee,0x00,0x00},{0x7f,0x00,0x00}}; */ int main(int argc,char *argv[]) { MSPD msp; IMD id; double mf; int sn; if(argc!=2 && argc!=4){ printf("Usage : %s datafile_name [sampling_number multplier_factor](optional)\n",argv[0]); printf("default sampling number 200, multiplier factor 4 (range is -4*lambda0 to 4*lambda0)\n"); exit(0); } else if(argc==4){ sn=atoi(argv[2]); mf=atof(argv[3]); } else{ sn=200; mf=4.0; } read_dat_mmls(argv[1],&msp); // read data file print_data_mmls(&msp); // print data directory_name(argv[1],id.dir_name); // remove file-extension from argv[1] and add "_images" id.scale=1; // number for enlarge the output image id.m=sn; // sampling number id.rang=mf*msp.bm.lambda_0; // range of sampling id.ts=40; // time step per cycle make_directory(id.dir_name); id.ve=(double complex *)m_alloc2(id.m*id.m*3,sizeof(double complex),"example3.c, ve"); id.vh=(double complex *)m_alloc2(id.m*id.m*3,sizeof(double complex),"example3.c, vh"); // x=0 plane eh_field_x(&id,&msp); output_field("yz",&id,&msp); // y=0 plane eh_field_y(&id,&msp); output_field("xz",&id,&msp); // z=0 plane eh_field_z(&id,&msp); output_field("xy",&id,&msp); free(id.ve); free(id.vh); free_mmls(&msp); return 0; } void directory_name(char *src,char *nn) { int s1,s2; char *sd,fo[64]={},buf[54]={}; s1=strlen(src); if(s1>54){ printf("example3.c, directory_name(), directory name is too long. exit...\n"); exit(1); } sprintf(fo,"%s",src); sd=strrchr(fo,'.'); if(sd!=NULL){ s2=strlen(sd); strncpy(buf,src,s1-s2); sprintf(fo,"%s_images",buf); } sprintf(nn,"%s",fo); } void make_directory(char *dir_name) { int ret; ret=mkdir(dir_name,S_IRWXU|S_IRWXG); if(ret!=0 && errno!=EEXIST){ printf("failed to make directory. Exit.."); exit(1); } } void eh_field_x(IMD *id,MSPD *sp) { double complex e[3],h[3]; double x[3],dr; int i,j,d; dr=id->rang*2.0/(double)(id->m-1); for(i=0;i<3;i++){ id->me[i]=0.0; id->mh[i]=0.0; } // x=0 plane x[0]=0.0; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h) for(i=0;i<id->m;i++){ x[2]=id->rang-(double)i*dr; for(j=0;j<id->m;j++){ x[1]=-id->rang+(double)j*dr; total_EH_mmls(e,h,x,sp); // total field #pragma omp critical for(d=0;d<3;d++){ if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]); if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]); } for(d=0;d<3;d++){ id->ve[i*id->m*3+j*3+d]=e[d]; id->vh[i*id->m*3+j*3+d]=h[d]; } } } } void eh_field_y(IMD *id,MSPD *sp) { double complex e[3],h[3]; double x[3],dr; int i,j,d; dr=id->rang*2.0/(double)(id->m-1); for(i=0;i<3;i++){ id->me[i]=0.0; id->mh[i]=0.0; } // y=0 plane x[1]=0.0; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h) for(i=0;i<id->m;i++){ x[2]=id->rang-(double)i*dr; for(j=0;j<id->m;j++){ x[0]=-id->rang+(double)j*dr; total_EH_mmls(e,h,x,sp); // total field #pragma omp critical for(d=0;d<3;d++){ if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]); if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]); } for(d=0;d<3;d++){ id->ve[i*id->m*3+j*3+d]=e[d]; id->vh[i*id->m*3+j*3+d]=h[d]; } } } } void eh_field_z(IMD *id,MSPD *sp) { double complex e[3],h[3]; double x[3],dr; int i,j,d; dr=id->rang*2.0/(double)(id->m-1); for(i=0;i<3;i++){ id->me[i]=0.0; id->mh[i]=0.0; } // z=0 plane x[2]=0.0; #pragma omp parallel for schedule(dynamic) firstprivate(x) private(j,d,e,h) for(i=0;i<id->m;i++){ x[1]=id->rang-(double)i*dr; for(j=0;j<id->m;j++){ x[0]=-id->rang+(double)j*dr; total_EH_mmls(e,h,x,sp); // total field #pragma omp critical for(d=0;d<3;d++){ if(cabs(e[d])>id->me[d]) id->me[d]=cabs(e[d]); if(cabs(h[d])>id->mh[d]) id->mh[d]=cabs(h[d]); } for(d=0;d<3;d++){ id->ve[i*id->m*3+j*3+d]=e[d]; id->vh[i*id->m*3+j*3+d]=h[d]; } } } } void output_field(char *pl,IMD *id,MSPD *sp) { void output_png(int nt,double complex cet,char *pl,IMD *id); void output_color_bar(IMD *id); FILE *fp; char fn[128]; double dt; int n; dt=sp->bm.lambda_0/(double)id->ts; #pragma omp parallel for schedule(dynamic) for(n=0;n<id->ts;n++){ output_png(n,cexp(-I*sp->bm.omega*dt*(double)n),pl,id); } // print info sprintf(fn,"%s/%s_info.txt",id->dir_name,pl); fp=fopen(fn,"wt"); if(fp==NULL){ printf("Failed to open the %s file. Exit...\n",fn); exit(1); } fprintf(fp,"the range of color bar\n"); fprintf(fp,"Ex is %8e to %8e\n",-id->me[0],id->me[0]); fprintf(fp,"Ey is %8e to %8e\n",-id->me[1],id->me[1]); fprintf(fp,"Ez is %8e to %8e\n",-id->me[2],id->me[2]); fprintf(fp,"Hx is %8e to %8e\n",-id->mh[0],id->mh[0]); fprintf(fp,"Hy is %8e to %8e\n",-id->mh[1],id->mh[1]); fprintf(fp,"Hz is %8e to %8e\n",-id->mh[2],id->mh[2]); fclose(fp); // output color bar image output_color_bar(id); } void output_png(int nt,double complex cet,char *pl,IMD *id) { int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b); // -1 <= x <= 1 FILE *fep[3],*fhp[3]; char fname[256],sf[3]={'x','y','z'}; int j,i,sj,si,d,m,scale; png_uint_32 width,height; png_structp png_e[3],png_h[3]; png_infop info_e[3],info_h[3]; png_bytepp pd_e[3],pd_h[3]; png_byte r,g,b; m=id->m; scale=id->scale; width =m*(scale+1); height=m*(scale+1); for(d=0;d<3;d++){ png_e[d] =png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); info_e[d]=png_create_info_struct(png_e[d]); sprintf(fname,"%s/%s_E%c_%03d.png",id->dir_name,pl,sf[d],nt); fep[d]=fopen(fname,"wb"); if(fep[d]==NULL){ printf("Failed to open the %s file. Exit...\n",fname); exit(1); } png_h[d] =png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); info_h[d]=png_create_info_struct(png_h[d]); sprintf(fname,"%s/%s_H%c_%03d.png",id->dir_name,pl,sf[d],nt); fhp[d]=fopen(fname,"wb"); if(fhp[d]==NULL){ printf("Failed to open the %s file. Exit...\n",fname); exit(1); } png_init_io(png_e[d],fep[d]); png_set_IHDR(png_e[d],info_e[d],width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT); pd_e[d]=(png_bytepp)png_malloc(png_e[d],sizeof(png_bytep)*height); png_set_rows(png_e[d],info_e[d],pd_e[d]); png_init_io(png_h[d],fhp[d]); png_set_IHDR(png_h[d],info_h[d],width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT); pd_h[d]=(png_bytepp)png_malloc(png_h[d],sizeof(png_bytep)*height); png_set_rows(png_h[d],info_h[d],pd_h[d]); for(j=0;j<height;j++){ pd_e[d][j]=(png_bytep)png_malloc(png_e[d],sizeof(png_byte)*width*3); pd_h[d][j]=(png_bytep)png_malloc(png_h[d],sizeof(png_byte)*width*3); } } for(i=0;i<m;i++){ for(j=0;j<m;j++){ for(d=0;d<3;d++){ color_rgb(creal(cet*id->ve[i*m*3+j*3+d])/id->me[d],&r,&g,&b); for(si=0;si<=scale;si++){ for(sj=0;sj<=scale;sj++){ pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+0]=r; pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+1]=g; pd_e[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+2]=b; } } color_rgb(creal(cet*id->vh[i*m*3+j*3+d])/id->mh[d],&r,&g,&b); for(si=0;si<=scale;si++){ for(sj=0;sj<=scale;sj++){ pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+0]=r; pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+1]=g; pd_h[d][i*(scale+1)+si][(j*(scale+1)+sj)*3+2]=b; } } } } } for(d=0;d<3;d++){ png_write_png(png_e[d],info_e[d],PNG_TRANSFORM_IDENTITY,NULL); png_write_png(png_h[d],info_h[d],PNG_TRANSFORM_IDENTITY,NULL); for(j=0;j<height;j++){ png_free(png_e[d],pd_e[d][j]); png_free(png_h[d],pd_h[d][j]); } png_free(png_e[d],pd_e[d]); png_free(png_h[d],pd_h[d]); fclose(fep[d]); fclose(fhp[d]); } } void output_color_bar(IMD *id) { int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b); // -1 <= x <= 1 FILE *fp; char fname[128]; int j,i; png_uint_32 width,height; png_structp png; png_infop info; png_bytepp pdata; png_byte r,g,b; sprintf(fname,"%s/color_bar.png",id->dir_name); height=id->m*(id->scale+1); width=height/16; png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); info= png_create_info_struct(png); fp=fopen(fname,"wb"); if(fp==NULL){ printf("Failed to open the %s file. Exit...\n",fname); exit(1); } png_init_io(png, fp); png_set_IHDR(png,info,width,height,8,PNG_COLOR_TYPE_RGB,PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT,PNG_FILTER_TYPE_DEFAULT); pdata=(png_bytepp)png_malloc(png, sizeof(png_bytep)*height); png_set_rows(png,info,pdata); for(j=0;j<height;j++){ pdata[j]=(png_bytep)png_malloc(png,sizeof(png_byte)*width*3); } for(i=0;i<height;i++){ color_rgb(1.0-(2.0/(double)height)*(double)i,&r,&g,&b); for(j=0;j<width;j++){ pdata[i][j*3+0]=r; pdata[i][j*3+1]=g; pdata[i][j*3+2]=b; } } png_write_png(png, info, PNG_TRANSFORM_IDENTITY, NULL); for(j=0;j<height;j++){ png_free(png,pdata[j]); } png_free(png,pdata); fclose(fp); } int color_rgb(double x,png_byte *r,png_byte *g,png_byte *b) // -1 <= x <= 1 { double i_nc,dr,dg,db; unsigned int i,n,nc,nd; if(x<-1.0 || x>1.0){ *r=0x00; *g=0x00; *b=0x00; return -1; } n=(unsigned int)floor(pow(2,23)*(x+1.0)); nc=(unsigned int)pow(2,21); i_nc=1.0/(double)nc; if(n<nc*1) i=1; else if(n<nc*2) i=2; else if(n<nc*3) i=3; else if(n<nc*4) i=4; else if(n<nc*5) i=5; else if(n<nc*6) i=6; else if(n<nc*7) i=7; else if(n<nc*8) i=8; else { *r=ct1[8][0]; *g=ct1[8][1]; *b=ct1[8][2]; return 0; } nd=n-nc*(i-1); dr=(double)(ct1[i][0]-ct1[i-1][0])*i_nc; dg=(double)(ct1[i][1]-ct1[i-1][1])*i_nc; db=(double)(ct1[i][2]-ct1[i-1][2])*i_nc; *r=(png_byte)floor((double)ct1[i-1][0]+dr*(double)nd); *g=(png_byte)floor((double)ct1[i-1][1]+dg*(double)nd); *b=(png_byte)floor((double)ct1[i-1][2]+db*(double)nd); return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1020,1024)),ceild(16*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(8*t1+Nx+13,1024)),floord(16*t2+Nx+12,1024)),floord(16*t3+Nx+12,1024)),floord(16*t1-16*t2+Nz+Nx+11,1024));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),1024*t4+1022),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
papi_cntr.h
/** Copyright (c) 2012, Swiss National Supercomputing Center (CSCS) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Swiss National Supercomputing Center (CSCS) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PAPI_COUNTER_H #define PAPI_COUNTER_H #include <papi.h> #ifdef _OPENMP #include <omp.h> #endif #include <cstdlib> #include <cstring> #include <cassert> #include <cmath> #include <vector> #include <map> #include <algorithm> #include <string> #include <iostream> #include <iomanip> #include <fstream> #include <sstream> /** * @enum DerivedStatistics * @brief Derived PAPI statistics */ enum DerivedStatistics{ Derived_FLIPS, Derived_FLOPS, Derived_DP_vector_FLOPS, Derived_SP_vector_FLOPS, Derived_L1_DMR, Derived_L2_DMR, Derived_L1_TMR, Derived_L2_TMR, Derived_L3_TMR, Derived_Mem_Bandwidth, Derived_BANDWIDTH_SS, Derived_BANDWIDTH_DS }; /** * Get sum of the vector * @param [in] v - input vector * @return sum of the vector elements */ template <typename T> T VectorSum(std::vector<T> const &v) { T sum = T(); for(size_t i=0; i<v.size(); i++) { sum += v[i]; } return sum; } /** * Get mean value of the vector * @param v * @return */ template <typename T> T VectorMean(std::vector<T> const &v) { return VectorSum(v)/(T)v.size(); } /** * Write vector in Matlab format * @param fid - file to write into * @param name - Name of the vector (measured routine) * @param v - input vector */ template <typename TVec> void writeVecMatlab(std::ofstream &fid, const std::string & name, TVec const &v) { fid << name << " = ["; for(size_t i=0; i<v.size(); i++) { fid << v[i] << (i<(v.size()-1) ? " " : "];"); } fid << std::endl; } /** * @enum PapiFileFormat * @brief Enumerate the different output formats for counter information \n * LaTex support not currently implemented */ enum PapiFileFormat {FileFormatMatlab, FileFormatPlain, FileFormatLaTeX}; /** * @class Papi * @brief singleton that handles papi intialisation and access to papi calls */ class Papi { public: /// Get papi class instance static Papi* Instance(); /// destructor ~Papi(); /// Initialise papi void Init(); /// Get Numbero of PAPI events inline int GetNumberOfEvents() const { return eventNames.size(); }; /// Get event name std::string const &GetEventName(const int eventIndex) const { assert(eventIndex<GetNumberOfEvents()); return eventNames[eventIndex]; }; /// Get event number int GetEventNumber(const int eventIndex) const { assert(eventIndex<GetNumberOfEvents()); return events[eventIndex]; }; /// Get counter for a given thread long long GetCounter(const int threadIdx, const int counterIndex) const { assert(counterIndex<GetNumberOfEvents()); assert(threadIdx<GetNumThreads()); return hwCounterValues[threadIdx][counterIndex]; }; /// Get time for given thread double GetTime(int ThreadIdx) const { assert(ThreadIdx<GetNumThreads()); return threadTime[ThreadIdx]; }; /// Start PAPI counters void StartCounters(); /// Stop PAPI counters void StopCounters(); /// Get number of threads int GetNumThreads() const { return numThreads; }; /// Is counting? bool IsCounting() const { return counting; }; private: /// Default constructor Papi() : setup(false), debug(false), counting(false) {}; /// COPY constructor Papi(Papi const &) {}; /// Print papi error void papi_print_error(const int papiErrorCode) const; bool setup; bool debug; bool counting; int eventSet; int numThreads; std::vector<std::string> eventNames; std::vector<int> events; std::vector<double> threadTime; /// actual counter HW counter values std::vector<std::vector<long long> > hwCounterValues; static Papi* instance; }; /** * @class PapiCounter * @brief Class with counters for given routine */ class PapiCounter { public: /// Constructor PapiCounter(); /// Start counters void Start(); /// Stop counters void Stop(); /// Write to file void WriteToStream(std::string const &routineName, int eventId, std::ofstream &stream, const PapiFileFormat fileFormat); /// Get name std::string GetName(const int i) const { return names[i]; }; /// Get number int GetNumber(const int i) const { return numbers[i]; }; /// Get count long long GetValue(const int threadIdx, const int i) const { assert(threadIdx < GetNumThreads()); return counterValues[threadIdx][i]; }; /// Get time for thread double GetTime(const int threadIdx) const { assert(threadIdx < GetNumThreads()); return times[threadIdx]; }; /// Get aggregated time double GetTime() const { return VectorMean(times); }; /// Get number of counters int GetNumCounters() const { return names.size(); }; /// Get number of threads int GetNumThreads() const { return Papi::Instance()->GetNumThreads(); }; /// Get counters across all threads long long GetAggregaterdCounterValuesOverAllThreads(const int i) const; /// print Screen void PrintScreen(); std::vector<long long> GetIndividualValues(const int i) const; private: /// Is derived statistics available bool IsDerivedStatAvailable(const DerivedStatistics statIdx) const; /// Compute derived statistics std::vector<double> ComputederivedStat(const DerivedStatistics statIdx); std::vector<std::string> names; std::vector<int> numbers; std::vector<double> times; /// counters for a given routine over multiple invocations std::vector<std::vector<long long> > counterValues; }; /** * @class PapiCounterList * @brief class to manage all events that we want to benchmark \n * essentially a wrapper around map<string, PapiCoutner> where the string \n * is the routine name or a named code section */ class PapiCounterList { public: /// constructor PapiCounterList() { }; /// write to stream void WriteToFile(const std::string fileName, const PapiFileFormat fileFormat = FileFormatPlain); /// write to stream void WriteToFile(std::ofstream &fstream, const PapiFileFormat fileFormat = FileFormatPlain); ///print to screen void PrintScreen(); /// add routine void AddRoutine(const std::string routineName); /// Routine PapiCounter& Routine(const std::string routineName); /// override [] to allow access to events using ["eventName"] PapiCounter& operator[] (std::string &routineName) { return Routine(routineName); }; /// operator [] PapiCounter& operator[] (std::string routineName) { return Routine(routineName); }; private: std::map<std::string, PapiCounter> routineEvents; }; /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// //// REPLACEMENT FOR CPP SOURCE FILE /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// std::string derivedStatName(DerivedStatistics statIDX){ switch(statIDX){ case Derived_FLIPS: return std::string("derived_FLIPS"); break; case Derived_FLOPS: return std::string("derived_FLOPS"); break; case Derived_DP_vector_FLOPS: return std::string("derived_DP_vector_FLOPS"); break; case Derived_SP_vector_FLOPS: return std::string("derived_SP_vector_FLOPS"); break; case Derived_L1_DMR: return std::string("derived_L1_DMR"); break; case Derived_L2_DMR: return std::string("derived_L2_DMR"); break; case Derived_L1_TMR: return std::string("derived_L1_TMR"); break; case Derived_L2_TMR: return std::string("derived_L2_TMR"); break; case Derived_L3_TMR: return std::string("derived_L3_TMR"); break; case Derived_Mem_Bandwidth: return std::string("Derived_Mem_Bandwidth"); break; case Derived_BANDWIDTH_SS: return std::string("derived_BANDWIDTH_SS"); break; case Derived_BANDWIDTH_DS: return std::string("derived_BANDWIDTH_DS"); break; } return std::string(""); } int findString(std::vector<std::string> const& strVec, std::string str){ std::vector<std::string>::const_iterator it; it = std::find(strVec.begin(), strVec.end(), str); // return -1 if str not found in strVec if(it==strVec.end()) return -1; return it - strVec.begin(); } //============================================================================== // PAPI //============================================================================== /// initialise Papi instance Papi* Papi::instance = NULL; /** * Get papi class instance * @return Papi instance */ Papi* Papi::Instance() { return instance ? instance : (instance = new Papi); } /** * Initialise papi */ void Papi::Init() { // only initialise if not already initialised if (setup) { return; } int papiError; // set debugging if requested by environment variable char *debugStr = getenv("PAPI_DEBUG"); debug = (debugStr != NULL); if (debug) { std::cerr << "Papi debug mode on" << std::endl; } // Initialise the papi library */ papiError = PAPI_library_init(PAPI_VER_CURRENT); if (papiError != PAPI_VER_CURRENT) { std::cerr << "PAPI library init error!" << std::endl; exit (1); } #ifdef _OPENMP // assume fixed thread affinity, otherwise this approach fails papiError = PAPI_thread_init((long unsigned int (*)()) omp_get_thread_num); if (papiError != PAPI_OK) { std::cerr << "Could not initialize the library with openmp." << std::endl; exit (1); } numThreads = omp_get_max_threads(); #else numThreads = 1; #endif threadTime.resize(numThreads); // determine the number of hardware counters int numHWCounters; papiError = numHWCounters = PAPI_num_counters(); if (papiError <= PAPI_OK) { std::cerr << "PAPI error : unable to determine number of hardware counters" << std::endl; papi_print_error (papiError); exit (1); } if (debug) { std::cout << "There are " << numHWCounters << " hardware counters available" << std::endl; } // get user-defined list of hardware counters from environment variable char *papiCounters = getenv("PAPI_EVENTS"); if (debug) { std::cout << "PAPI_EVENTS = " << papiCounters << std::endl; } char *result = NULL; char delim[] = "|"; if (papiCounters == NULL) { result = NULL; } else { result = strtok(papiCounters, delim); } while (result != NULL) { int eventID; papiError = PAPI_event_name_to_code(result, &eventID); if (papiError == PAPI_OK && std::find(events.begin(), events.end(), eventID) == events.end()) { eventNames.push_back(std::string(result)); events.push_back(eventID); } else { std::cerr << "Papi Error : not adding event : " << result << std::endl; } result = strtok(NULL, delim); } if (debug) { std::cout << "there are " << eventNames.size() << " requested counters" << std::endl; } if (GetNumberOfEvents() == 0) { setup = true; return; } if (GetNumberOfEvents() > 127) { std::cerr << "Too many events selected : exiting" << std::endl; exit(-1); } eventSet = PAPI_NULL; papiError = PAPI_create_eventset(&eventSet); if (papiError != PAPI_OK) { std::cerr << "Papi error : Could not create the EventSet" << std::endl; papi_print_error(papiError); exit(-1); } if (debug) { for (int i = 0; i < GetNumberOfEvents(); i++) std::cerr << "Event " << i << " out of " << GetNumberOfEvents() << " = " << GetEventName(i) << std::endl; } // allocate space for counters hwCounterValues.resize(numThreads); for (int i = 0; i < numThreads; i++) { hwCounterValues[i].resize(GetNumberOfEvents()); } setup = true; } /** * Print PAPI error * @param [in] papiErrorCode */ void Papi::papi_print_error(const int papiErrorCode) const { char * errString = PAPI_strerror(papiErrorCode); std::cerr << "PAPI error : " << errString << std::endl; } /** * Start PAPI counters */ void Papi::StartCounters() { if (!setup) { Init(); } if (IsCounting()) { std::cerr << "PAPI counters error : cannot start papi counters when they are already running" << std::endl; exit(-1); } #ifdef _OPENMP #pragma omp parallel #endif { if (GetNumberOfEvents()) { int papiError = PAPI_start_counters(&events[0], events.size()); if (papiError != PAPI_OK) { std::cerr << "PAPI error : unable to start counters" << std::endl; papi_print_error(papiError); exit(-1); } } #ifdef _OPENMP int threadIndex = omp_get_thread_num(); double timeTmp = omp_get_wtime(); #else int threadIndex = 0; double timeTmp = PAPI_get_virt_usec() / 1e6; #endif threadTime[threadIndex] = -timeTmp; } counting = true; } /** * Stop PAPI counters */ void Papi::StopCounters() { if (!IsCounting()) { std::cerr << "PAPI counters error : cannot stop papi counters when they are have not been started" << std::endl; exit(-1); } #ifdef _OPENMP #pragma omp parallel #endif { #ifdef _OPENMP int threadIndex = omp_get_thread_num(); #else int threadIndex = 0; #endif if (GetNumberOfEvents()) { int papiError = PAPI_stop_counters(&hwCounterValues[threadIndex][0], events.size()); if (papiError != PAPI_OK) { std::cerr << "PAPI error : unable to stop counters" << std::endl; papi_print_error(papiError); exit(-1); } } #ifdef _OPENMP threadTime[threadIndex] += omp_get_wtime(); #else threadTime[threadIndex] += (PAPI_get_virt_usec() / 1e6); #endif } counting = false; } //============================================================================== // PapiCounter //============================================================================== /** * Constructor */ PapiCounter::PapiCounter() { Papi::Instance()->Init(); const int numCounters = Papi::Instance()->GetNumberOfEvents(); const int numThreads = Papi::Instance()->GetNumThreads(); for (int i = 0; i < numCounters; i++) { names.push_back(Papi::Instance()->GetEventName(i)); numbers.push_back(Papi::Instance()->GetEventNumber(i)); } counterValues.resize(numThreads); for (int tid = 0; tid < numThreads; tid++) { counterValues[tid].resize(numCounters, 0LL); } times.resize(numThreads); } /** * Stop counters (accumulate) */ void PapiCounter::Stop() { Papi::Instance()->StopCounters(); const int numCounters = Papi::Instance()->GetNumberOfEvents(); const int numThreads = Papi::Instance()->GetNumThreads(); for (int tid = 0; tid < numThreads; tid++) { for (int i = 0; i < numCounters; i++) { counterValues[tid][i] += Papi::Instance()->GetCounter(tid, i); } times[tid] += Papi::Instance()->GetTime(tid); } } /** * Start counter */ void PapiCounter::Start() { Papi::Instance()->StartCounters(); } /** * Get aggregated values over all threads * @param i - index of the counter * @return value for the counter over all threads */ long long PapiCounter::GetAggregaterdCounterValuesOverAllThreads(const int i) const { assert(i < GetNumCounters()); long long sum = 0LL; for (int tid = 0; tid < GetNumThreads(); tid++) { sum += GetValue(tid, i); //return sum/(long long)threads(); } return sum; } /** * Get counter values for each thread * @param counter index * @return vector with individual thread values for the counter */ std::vector<long long> PapiCounter::GetIndividualValues(const int i) const { assert(i < GetNumCounters()); std::vector<long long> tmp; for (int tid = 0; tid < GetNumThreads(); tid++) { tmp.push_back(GetValue(tid, i)); } return tmp; } /** * Write counter to file * @param runName * @param eventID * @param fileId * @param fileFormat */ void PapiCounter::WriteToStream(std::string const &routineName, int eventId, std::ofstream &stream, PapiFileFormat fileFormat) { if (GetNumCounters()) { int numThreads = Papi::Instance()->GetNumThreads(); switch (fileFormat) { case FileFormatPlain: stream << "----------------------------" << std::endl; stream << routineName << " :: wall time " << GetTime() << " s" << std::endl; stream << "----------------------------" << std::endl; if (Papi::Instance()->GetNumThreads() > 1) { for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++) { stream << " THREAD" << std::setw(2) << tid; } } stream << " [ TOTAL ]" << std::endl; for (int i = 0; i < GetNumCounters(); i++) { if (Papi::Instance()->GetNumThreads() > 1) { for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++) { stream << " " << std::setw(12) << GetValue(tid, i); } } stream << " [ " << std::setw(12) << GetAggregaterdCounterValuesOverAllThreads(i) << " ]" << "\t" << GetName(i) << std::endl; } /* for(int i in derivedCounters<platform>::counters){ if( derivedCounters<platform>::counters[i].is_available() ) compute; write to screen; } */ if (IsDerivedStatAvailable(Derived_FLIPS)) { std::vector<double> stat = ComputederivedStat(Derived_FLIPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]" << "\tderived_FLIPS (MFLIPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_FLOPS)) { std::vector<double> stat = ComputederivedStat(Derived_FLOPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]" << "\tderived_FLOPS (MFLOPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_DP_vector_FLOPS)) { std::vector<double> stat = ComputederivedStat(Derived_DP_vector_FLOPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]" << "\tderived_DP_vector_FLOPS (MFLOPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_SP_vector_FLOPS)) { std::vector<double> stat = ComputederivedStat(Derived_SP_vector_FLOPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6)<< " ]" << "\tderived_SP_vector_FLOPS (MFLOPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_L1_DMR)) { std::vector<double> stat = ComputederivedStat(Derived_L1_DMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } stream << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_DMR (%)" << std::endl; } else { stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_DMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L2_DMR)) { std::vector<double> stat = ComputederivedStat(Derived_L2_DMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } stream<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_DMR (%)" << std::endl; } else { stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_DMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L1_TMR)) { std::vector<double> stat = ComputederivedStat(Derived_L1_TMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } stream << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_TMR (%)" << std::endl; } else { stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_TMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L2_TMR)) { std::vector<double> stat = ComputederivedStat(Derived_L2_TMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } stream<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_TMR (%)" << std::endl; } else { stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_TMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L3_TMR)) { std::vector<double> stat = ComputederivedStat(Derived_L3_TMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } stream<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_L3_TMR (%)" << std::endl; } else { stream << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L3_TMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_Mem_Bandwidth)) { std::vector<double> stat = ComputederivedStat(Derived_Mem_Bandwidth); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { stream << std::setw(10) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s"; } stream<< " [ " << std::setw(10) << "-" << " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl; } else { stream << " [ " << std::setw(9) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s"<< " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl; } } if (IsDerivedStatAvailable(Derived_BANDWIDTH_SS)) { std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_SS); if (numThreads > 1) for (int tid = 0; tid < numThreads; tid++) stream << " - "; stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_BANDWIDTH_SS (MB/s)" << std::endl; } if (IsDerivedStatAvailable(Derived_BANDWIDTH_DS)) { std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_DS); if (numThreads > 1) for (int tid = 0; tid < numThreads; tid++) stream << " - "; stream << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_BANDWIDTH_DS (MB/s)" << std::endl; } break; case FileFormatMatlab: stream << routineName << " = " << eventId << ";" << std::endl; for (int i = 0; i < GetNumCounters(); i++) { std::stringstream vname; vname << "event{" <<routineName<< "}.counter(" << i + 1 << ").count"; stream << "event{" <<routineName<< "}.counter(" << i + 1 << ").name = \'" << GetName(i) << "\';" << std::endl; writeVecMatlab(stream, vname.str(), GetIndividualValues(i)); } break; case FileFormatLaTeX: stream << "\\hline" << std::endl; stream << "\\multicolumn{2}{c}{" << routineName << "}" << std::endl; stream << "\\hline" << std::endl; stream << "counter & count" << "\\\\" << std::endl; stream << "\\hline" << std::endl; for (int i = 0; i < GetNumCounters(); i++) stream << "\\lst{" << GetName(i) << "}" << " & " << GetAggregaterdCounterValuesOverAllThreads(i) << "\\\\" << std::endl; break; } } } /** * Print to screan */ void PapiCounter::PrintScreen() { int numThreads = Papi::Instance()->GetNumThreads(); if (GetNumCounters() > 0) { if (Papi::Instance()->GetNumThreads() > 1) { for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++) { std::cout << " THREAD" << std::setw(2) << tid; } } std::cout << " [ TOTAL ]" << std::endl; for (int i = 0; i < GetNumCounters(); i++) { if (Papi::Instance()->GetNumThreads() > 1) { for (int tid = 0; tid < Papi::Instance()->GetNumThreads(); tid++) { std::cout << " " << std::setw(12) << GetValue(tid, i); } } std::cout << " [ " << std::setw(12) << GetAggregaterdCounterValuesOverAllThreads(i) << " ]" << "\t" << GetName(i) << std::endl; } if (IsDerivedStatAvailable(Derived_FLIPS)) { std::vector<double> stat = ComputederivedStat(Derived_FLIPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_FLIPS (MFLIPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_FLOPS)) { std::vector<double> stat = ComputederivedStat(Derived_FLOPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_FLOPS (MFLOPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_DP_vector_FLOPS)) { std::vector<double> stat = ComputederivedStat(Derived_DP_vector_FLOPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_DP_vector_FLOPS (MFLOPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_SP_vector_FLOPS)) { std::vector<double> stat = ComputederivedStat(Derived_SP_vector_FLOPS); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << " " << std::setw(12) << stat[tid] / VectorMean(times) / (1.e6); } } std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_SP_vector_FLOPS (MFLOPS)" << std::endl; } if (IsDerivedStatAvailable(Derived_L1_DMR)) { std::vector<double> stat = ComputederivedStat(Derived_L1_DMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_DMR (%)" << std::endl; } else { std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_DMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L2_DMR)) { std::vector<double> stat = ComputederivedStat(Derived_L2_DMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_DMR (%)" << std::endl; } else { std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_DMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L1_TMR)) { std::vector<double> stat = ComputederivedStat(Derived_L1_TMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L1_TMR (%)" << std::endl; } else { std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L1_TMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L2_TMR)) { std::vector<double> stat = ComputederivedStat(Derived_L2_TMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L2_TMR (%)" << std::endl; } else { std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L2_TMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_L3_TMR)) { std::vector<double> stat = ComputederivedStat(Derived_L3_TMR); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << std::setw(12) << std::setprecision(3)<< stat[tid] * 100 << "%"; } std::cout << " [ " << std::setw(12) << "-" << " ]" << "\tderived_L3_TMR (%)" << std::endl; } else { std::cout << " [ " << std::setw(11) << std::setprecision(3)<< stat[0] * 100 << "%" << " ]" << "\tderived_L3_TMR (%)" << std::endl; } } if (IsDerivedStatAvailable(Derived_Mem_Bandwidth)) { std::vector<double> stat = ComputederivedStat(Derived_Mem_Bandwidth); if (numThreads > 1) { for (int tid = 0; tid < numThreads; tid++) { std::cout << std::setw(10) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s"; } std::cout<< " [ " << std::setw(12) << "-" << " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl; } else { std::cout << " [ " << std::setw(9) << std::setprecision(3)<< VectorSum(stat) * 64 / VectorMean(times) / (1024*1024)<< "MB/s"<< " ]" << "\tderived_Mem_Bandwidth [MB/s]" << std::endl; } } if (IsDerivedStatAvailable(Derived_BANDWIDTH_SS)) { std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_SS); if (numThreads > 1) for (int tid = 0; tid < numThreads; tid++) std::cout << " - "; std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_BANDWIDTH_SS (MB/s)" << std::endl; } if (IsDerivedStatAvailable(Derived_BANDWIDTH_DS)) { std::vector<double> stat = ComputederivedStat(Derived_BANDWIDTH_DS); if (numThreads > 1) for (int tid = 0; tid < numThreads; tid++) std::cout << " - "; std::cout << " [ " << std::setw(12) << VectorSum(stat) / VectorMean(times) / (1.e6) << " ]" << "\tderived_BANDWIDTH_DS (MB/s)" << std::endl; } } else { std::cout << "PAPI-WRAP :: no counters to print" << std::endl; } } /** * Are derived statistics available * @param statIdx * @return */ bool PapiCounter::IsDerivedStatAvailable(const DerivedStatistics statIdx) const { switch (statIdx) { case Derived_FLIPS: return findString(names, std::string("PAPI_FP_INS")) >= 0 ? true : false; case Derived_FLOPS: return findString(names, std::string("PAPI_FP_OPS")) >= 0 ? true : false; case Derived_DP_vector_FLOPS: return findString(names, std::string("PAPI_DP_OPS")) >= 0 ? true : false; case Derived_SP_vector_FLOPS: return findString(names, std::string("PAPI_SP_OPS")) >= 0 ? true : false; case Derived_L1_DMR: return ( findString(names, std::string("PAPI_L1_DCA")) >= 0 && findString(names, std::string("PAPI_L1_DCM")) >= 0 ); case Derived_L2_DMR: return ( findString(names, std::string("PAPI_L2_DCA")) >= 0 && findString(names, std::string("PAPI_L2_DCM")) >= 0 ); case Derived_L1_TMR: return ( findString(names, std::string("PAPI_L1_TCA")) >= 0 && findString(names, std::string("PAPI_L1_TCM")) >= 0 ); case Derived_L2_TMR: return ( findString(names, std::string("PAPI_L2_TCA")) >= 0 && findString(names, std::string("PAPI_L2_TCM")) >= 0 ); case Derived_L3_TMR: return ( findString(names, std::string("PAPI_L3_TCA")) >= 0 && findString(names, std::string("PAPI_L3_TCM")) >= 0 ); case Derived_Mem_Bandwidth: return findString(names, std::string("PAPI_L3_TCM")) >= 0 ? true : false; case Derived_BANDWIDTH_SS: return ( findString(names, std::string("SYSTEM_READ_RESPONSES:0x07")) >= 0 && findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01")) >= 0 ); case Derived_BANDWIDTH_DS: return ( findString(names, std::string("SYSTEM_READ_RESPONSES:0x07")) >= 0 && findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01")) >= 0 ); } return false; } /** * Compute derived statistics * @param statIdx * @return */ std::vector<double> PapiCounter::ComputederivedStat(DerivedStatistics statIdx) { std::vector<double> derived(GetNumThreads()); int idx, idxCM, idxCA; int idxSRS, idxOWT; switch (statIdx) { case Derived_FLIPS: idx = findString(names, std::string("PAPI_FP_INS")); for (int tid = 0; tid < GetNumThreads(); tid++) { // returns total number of FP operations // determine FLOPS by summing and dividing by time derived[tid] = GetValue(tid, idx); } return derived; case Derived_FLOPS: idx = findString(names, std::string("PAPI_FP_OPS")); for (int tid = 0; tid < GetNumThreads(); tid++) { // returns total number of FP operations // determine FLOPS by summing and dividing by time derived[tid] = GetValue(tid, idx); } return derived; case Derived_DP_vector_FLOPS: idx = findString(names, std::string("PAPI_DP_OPS")); for (int tid = 0; tid < GetNumThreads(); tid++) { // returns total number of FP operations // determine FLOPS by summing and dividing by time derived[tid] = GetValue(tid, idx); } return derived; case Derived_SP_vector_FLOPS: idx = findString(names, std::string("PAPI_SP_OPS")); for (int tid = 0; tid < GetNumThreads(); tid++) { // returns total number of FP operations // determine FLOPS by summing and dividing by time derived[tid] = GetValue(tid, idx); } return derived; case Derived_L1_DMR: idxCM = findString(names, std::string("PAPI_L1_DCM")); idxCA = findString(names, std::string("PAPI_L1_DCA")); for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA); } return derived; case Derived_L2_DMR: idxCM = findString(names, std::string("PAPI_L2_DCM")); idxCA = findString(names, std::string("PAPI_L2_DCA")); for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA); } return derived; case Derived_L1_TMR: idxCM = findString(names, std::string("PAPI_L1_TCM")); idxCA = findString(names, std::string("PAPI_L1_TCA")); for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA); } return derived; case Derived_L2_TMR: idxCM = findString(names, std::string("PAPI_L2_TCM")); idxCA = findString(names, std::string("PAPI_L2_TCA")); for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA); } return derived; case Derived_L3_TMR: idxCM = findString(names, std::string("PAPI_L3_TCM")); idxCA = findString(names, std::string("PAPI_L3_TCA")); for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxCM) / (double) GetValue(tid, idxCA); } return derived; case Derived_Mem_Bandwidth: idx = findString(names, std::string("PAPI_L3_TCM")); for (int tid = 0; tid < GetNumThreads(); tid++) { // returns total number of FP operations // determine FLOPS by summing and dividing by time derived[tid] = GetValue(tid, idx); } return derived; // see page 2101 of Shirley Moore et al., Procedia Computer Science 4 (2011) // this needs some tweaking for Interlagos (15h): case Derived_BANDWIDTH_SS: idxSRS = findString(names, std::string("SYSTEM_READ_RESPONSES:0x07")); idxOWT = findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01")); for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxSRS)*32. + (double) GetValue(tid, idxOWT)*8.; } return derived; case Derived_BANDWIDTH_DS: idxSRS = findString(names, std::string("SYSTEM_READ_RESPONSES:0x07")); idxOWT = findString(names, std::string("OCTWORD_WRITE_TRANSFERS:0x01")); // dual stream: accumulate bandwidth of one core per Bulldozer module for (int tid = 0; tid < GetNumThreads(); tid++) { derived[tid] = (double) GetValue(tid, idxSRS)*32. + (double) GetValue(tid, idxOWT)*4.; } return derived; } // this will never occur - just to keep the compiler warnings off return std::vector<double>(0); } /*================================================ PapiCounterList ================================================*/ /** * Add routine to the papi couner * @param routineName */ void PapiCounterList::AddRoutine(const std::string routineName) { // ensure that someone hasn't already added an event with this name assert(routineEvents.find(routineName) == routineEvents.end()); routineEvents[routineName] = PapiCounter(); } /** * Get counter for the routine * @param routineName * @return return counter for the routine number */ PapiCounter& PapiCounterList::Routine(std::string routineName) { // ensure that an event with ename exists assert(routineEvents.find(routineName) != routineEvents.end()); return routineEvents[routineName]; } /** * Write to stram * @param fileName * @param fileFormat */ void PapiCounterList::WriteToFile(const std::string fileName, PapiFileFormat fileFormat) { std::ofstream fid; fid.open(fileName.c_str()); switch (fileFormat) { case FileFormatMatlab: break; case FileFormatPlain: break; case FileFormatLaTeX: fid << "\\begin{tabular}{lr}" << std::endl; break; } int id = 1; for (std::map<std::string, PapiCounter>::iterator it = routineEvents.begin(); it != routineEvents.end(); it++) { it->second.WriteToStream(it->first, id, fid, fileFormat); id++; } switch (fileFormat) { case FileFormatMatlab: break; case FileFormatPlain: break; case FileFormatLaTeX: fid << "\\hline" << std::endl; fid << "\\end{tabular}" << std::endl; break; } fid.close(); } /** * Write to stream * @param fid * @param fileFormat */ void PapiCounterList::WriteToFile(std::ofstream &fstream, PapiFileFormat fileFormat) { switch (fileFormat) { case FileFormatMatlab: break; case FileFormatPlain: break; case FileFormatLaTeX: fstream << "\\begin{tabular}{lr}" << std::endl; break; } int id = 1; for (std::map<std::string, PapiCounter>::iterator it = routineEvents.begin(); it != routineEvents.end(); it++) { it->second.WriteToStream(it->first, id++, fstream, fileFormat); } switch (fileFormat) { case FileFormatMatlab: break; case FileFormatPlain: break; case FileFormatLaTeX: fstream << "\\hline" << std::endl; fstream << "\\end{tabular}" << std::endl; break; } // close the file stream fstream.close(); } /** * Print to screen */ void PapiCounterList::PrintScreen() { for (std::map<std::string, PapiCounter>::iterator it = routineEvents.begin(); it != routineEvents.end(); it++) { std::cout << "--------------------------------" << std::endl; std::cout << it->first << " :: wall time " << it->second.GetTime() << " s" << std::endl; std::cout << "--------------------------------" << std::endl; it->second.PrintScreen(); } } #endif
GB_unop__frexpe_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__frexpe_fp32_fp32 // op(A') function: GB_unop_tran__frexpe_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_frexpef (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpef (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_frexpef (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPE || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__frexpe_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpef (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__frexpe_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
HelloOpenMP_fix5.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]){ printf("Goodbye slow serial world and Hello OpenMP!\n"); #pragma omp parallel if (omp_get_thread_num() == 0) { printf(" I have %d thread(s) and my thread id is %d\n", omp_get_num_threads(), omp_get_thread_num()); } }
GB_binop__bxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint32) // A*D function (colscale): GB (_AxD__bxor_uint32) // D*A function (rowscale): GB (_DxB__bxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint32) // C=scalar+B GB (_bind1st__bxor_uint32) // C=scalar+B' GB (_bind1st_tran__bxor_uint32) // C=A+scalar GB (_bind2nd__bxor_uint32) // C=A'+scalar GB (_bind2nd_tran__bxor_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if (action == SetAction) { eigen_internal_assert(v != 0); m_maxThreads = *v; } else if (action == GetAction) { eigen_internal_assert(v != 0); #ifdef EIGEN_HAS_OPENMP if (m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} Index volatile sync; int volatile users; Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0, rows, 0, cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1, size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize)); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if ((!Condition) || (threads == 1) || (omp_get_num_threads()>1)) return func(0, rows, 0, cols); Eigen::initParallel(); func.initParallelSession(threads); if (transpose) std::swap(rows, cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>, info, threads, 0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows / Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i + 1 == actual_threads) ? rows - r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i + 1 == actual_threads) ? cols - c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if (transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
GB_unaryop__minv_int16_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_int64 // op(A') function: GB_tran__minv_int16_int64 // C type: int16_t // A type: int64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_int64 ( int16_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_push_pop_stack.c
#include <stdio.h> #include <omp.h> int MAXSIZE = 8; int stack[8]; int top = -1; int isEmpty() { if(top == -1) return 1; else return 0; } int isFull() { if(top == MAXSIZE) return 1; else return 0; } int pop() { int data; if(!isEmpty()) { data = stack[top]; top = top - 1; return data; } else { printf("Could not retrieve data, Stack is empty.\n"); } } int push(int data) { if(!isFull()) { top = top + 1; stack[top] = data; } else { printf("Could not insert data, Stack is full.\n"); } } int main(){ int id, num; omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id = omp_get_thread_num(); if(id == 0){ while(1){ #pragma omp critical { if(!isFull()){ printf("Enter a number to push\n"); scanf("%d", &num); push(num); } else { printf("Stack is full"); } fgetc(stdin); } } } else { while(1){ #pragma omp critical { if(!isEmpty()){ printf("Deleted item = %d\n", pop()); } else { printf("Stack is empty"); } fgetc(stdin); } } } } return 0; }
Searching.202007271719.gather_top_m.subsearch.profile.h
// // Created by Zhen Peng on 7/27/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> #include <algorithm> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; // int num_real_threads_ = 1; // int num_threads_intra_query_ = 1; // int num_threads_inter_query_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, const idi queue_capacity, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); // idi merge_all_queues_para_array( //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // std::vector<Candidate> &set_L, // const idi L); idi merge_all_queues_para_array( std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); void merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2); void merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length); distf selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, // const idi local_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes); void selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts); void gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs); // idi merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); // idi min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; uint64_t count_add_to_queue_ = 0; // uint64_t count_single_query_computation_ = 0; // distf dist_min_ = 0; // distf dist_max_ = 0; double time_merge_ = 0; double time_select_ = 0; // double time_select_L_ = 0.0; // double time_select_M_ = 0.0; double time_initialization_ = 0; double time_sequential_phase_ = 0; double time_parallel_phase_ = 0; double time_ending_ = 0.0; double time_assign_s_ = 0.0; double time_expand_ = 0.0; double time_pick_top_m_ = 0.0; double time_distance_computation_ = 0.0; double time_add_to_queue_ = 0.0; // double time_insert_ = 0; // double time_compare_minimum_ = 0; // double time_memmove_ = 0; // std::vector<double> time_memmove_list_; // L3CacheMissRate profile_miss_rate; // uint64_t number_local_elements_ = 0; // std::vector<idi> L_ids_; // std::vector<idi> M_ids_; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, const unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); // void search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids); // void search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); // void para_search_with_top_m_critical_area( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_no_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_yes_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); // void para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_in_array( // void para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); // void para_search_with_top_m_merge_queues_by_sort( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &dest_offsets, // const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L. // BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v2( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_better_merge_v1( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, //// std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited); // void para_search_with_top_m_merge_queues_better_merge_v0_0( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_less_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds); //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_no_merge( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited, // std::vector<distf> &local_thresholds, // const uint64_t computation_threshold); // void para_search_with_top_m_merge_queues_scale_m_v0( // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); // std::vector<distf> &local_thresholds); // BitVector &is_visited) // void para_search_with_top_m_merge_queues_scale_m_v2( // const idi value_M_min, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_scale_m_v3( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_sequential_merge( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_nested_para( const idi batch_start, const idi batch_size, const idi value_M_middle, const idi value_M_max, const idi K, const idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue std::vector< std::vector<idi> > &top_m_candidates_list, std::vector< boost::dynamic_bitset<> > &is_visited_list); void subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation); // void subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation); void subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue); void para_search_with_top_m_subsearch_v3( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts, std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_subsearch_v4( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_distance_threshold_m( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi middle_iteration, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_myths( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); //// std::vector<uint8_t> &is_visited); //// boost::dynamic_bitset<> &is_visited); //// void para_prepare_init_ids( //// std::vector<unsigned> &init_ids, //// unsigned L) const; // void para_search_with_top_m_in_batch_embarassing_para( // const PANNS::idi M, // const PANNS::idi batch_start, // const PANNS::idi batch_size, // const PANNS::idi K, // const PANNS::idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list); // void test_neighbors_distance_to_father( // const idi num_selected) const; // void test_neighbors_normalized_distance_to_father( // const idi num_selected) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("Iteration: Relative_Distance:\n"); //// printf("Iteration: Relative_Distance:\n"); //// printf("----query: %u----\n", query_id); // } boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. idi tmp_count = 0; // for debug // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { ++tmp_count; top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // {//test // if (0 == query_id) { // exit(1); // } // } } //inline void Searching::search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // BitVector is_visited(num_v_); // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { //// is_visited[init_ids[v_i]] = true; // is_visited.atomic_set_bit(init_ids[v_i]); // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } //// cache_miss_kernel.measure_stop(); //#pragma omp parallel for // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, const unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } //// DEPRECATED. // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. //inline idi Searching::add_into_queue( // std::vector<PANNS::Candidate> &queue, // idi &queue_top, // const idi queue_size, // const PANNS::Candidate &cand) //{ // assert(queue_size > 1); // if (0 == queue_top) { // queue[queue_top++] = cand; // return 0; // } else if (1 == queue_top) { // if (queue[0] < cand) { // queue[queue_top++] = cand; // return 1; // } else { // queue[++queue_top] = queue[0]; // queue[0] = cand; // return 0; // } // } // // if (queue[queue_top - 1] < cand) { // if (queue_top < queue_size) { // queue[queue_top++] = cand; // } // return queue_top; // } // // idi r = insert_into_queue( // queue, // queue_top - 1, // cand); //// {//test //// printf("r: %u" //// "queue_top: %u " //// "queue_size: %u\n", //// r, //// queue_top, //// queue_size); //// } // return r; // //// ///////////////////////////////////////////////////////////// //// // Find the insert location //// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); //// idi insert_loc = it_loc - queue.begin(); //// if (insert_loc == queue_size) { //// return queue_size; //// } //// //// // Insert ////// if (queue_top == queue_size) { ////// // If full already ////// --queue_top; ////// } //// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), //// reinterpret_cast<char *>(queue.data() + insert_loc), //// (queue_top - insert_loc) * sizeof(Candidate)); ////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) { ////// queue.at(q_i) = queue.at(q_i - 1); ////// } //// queue[insert_loc] = cand; //// ++queue_top; //// return insert_loc; //} // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, // The insertion location starting from queue_start const idi queue_capacity, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_size) { queue[queue_start + queue_size++] = cand; return 0; } idi queue_end = queue_start + queue_size; // Find the insert location const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc != queue_end) { if (cand.id_ == it_loc->id_) { // Duplicate return queue_capacity; } if (queue_size >= queue_capacity) { // Queue is full --queue_size; --queue_end; } } else { // insert_loc == queue_end, insert at the end? if (queue_size < queue_capacity) { // Queue is not full // Insert at the end queue[insert_loc] = cand; ++queue_size; return queue_size - 1; } else { // Queue is full return queue_capacity; } } // Add into queue memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); queue[insert_loc] = cand; ++queue_size; return insert_loc - queue_start; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // Insert the 1st of queue2 if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate insert_one_element_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline void Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { idi copy_count = (queue1_size + queue2_size > queue1_length) ? queue1_length - queue1_size : queue2_size; memmove(queue1.data() + queue1_start + queue1_size, queue2.data() + queue2_start, copy_count * sizeof(Candidate)); queue1_size += copy_count; return; } if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); } if (queue2_size == 1) { return; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; // idi insert_i; for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); for ( ; insert_i < queue1_size; ++insert_i) { queue1[queue1_start + insert_i] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } else { // Duplicate ++q_i_2; ++q_i_1; } } } inline idi Searching::merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q, // local_queues_list[ai][i_q].id_, // local_queues_list[ai][i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const int num_queues = num_threads_; idi nk = L; int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != num_queues) { for (int i = size; i < num_queues; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends // Not do this for Collector Idea or Selecting Idea std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* Function: * When merge all queues (in an array, and [num_threads_ - 1] is the global queue), * the starting local is at [queue_base] */ inline idi Searching::merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L) { idi nk = L; int size = 1 << (static_cast<idi>(log2(real_threads))); // int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); idi i_bound = size + queue_base; #pragma omp parallel for num_threads(real_threads) for (idi i = queue_base; i < i_bound; i += by) { // for (int i = 0; i < size; i += by) { // idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1 idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; // idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != real_threads) { // if (size != num_threads_) { for (int i = size + queue_base; i < num_threads_; ++i) { // for (int i = size; i < num_threads_; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } inline void Searching::merge_two_consecutive_queues_in_place( std::vector<Candidate> &two_queues, const idi base_1, // const idi &end_1, const idi base_2, const idi &length_2) { // idi tid = omp_get_thread_num(); idi index_1 = base_1; idi index_2 = base_2; const idi bound_2 = base_2 + length_2; while (index_1 < index_2 && index_2 < bound_2) { Candidate e_1 = two_queues[index_1]; Candidate e_2 = two_queues[index_2]; if (e_1 < e_2) { ++index_1; } else if (e_2 < e_1) { // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); two_queues[index_1] = e_2; ++index_1; ++index_2; } else { // Duplicate, but have no idea what to do right now // time_memmove_list_[tid] -= WallTimer::get_time_mark(); std::memmove(two_queues.data() + index_1 + 1, two_queues.data() + index_1, (index_2 - index_1) * sizeof(Candidate)); // time_memmove_list_[tid] += WallTimer::get_time_mark(); index_1 += 2; ++index_2; } } } ///* Function: // * Merge all queues to the global queue, in a two-queue-merge way // */ //inline idi Searching::merge_all_queues_all_together_in_sequential( // std::vector<Candidate> &set_L, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // const idi global_queue_base = (num_queues - 1) * local_queue_length; // std::vector<idi> queue_heads(num_queues, 0); // idi queue_id_min; // //// bool is_finished = false; // bool is_1st_selected = true; // idi nk = L; // The highest location of insertion. // { // for (idi q_i = 0; q_i < num_queues; ++q_i) { // if (0 == local_queues_ends[q_i]) { // continue; // } // _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0); // } // } // while (queue_heads[num_queues - 1] < L) { //// time_compare_minimum_ -= WallTimer::get_time_mark(); // queue_id_min = min_all_queues_at_heads( // set_L, // queue_heads, // local_queues_ends, // local_queue_length, // L); //// time_compare_minimum_ += WallTimer::get_time_mark(); // if (queue_id_min != num_queues - 1) { // Not in the global queue //// time_insert_ -= WallTimer::get_time_mark(); // insert_one_element_at( // set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length], // set_L, // queue_heads[num_queues - 1], // global_queue_base, // L); //// time_insert_ += WallTimer::get_time_mark(); // if (is_1st_selected) { // Get the highest inserting location // is_1st_selected = false; // nk = queue_heads[num_queues - 1]; // } // ++queue_heads[queue_id_min]; // } // ++queue_heads[num_queues - 1]; // } // // // Reset local_queues_ends // std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // return nk; //} ///* Function: // * Find the minimum among queues at their head locations // */ //inline idi Searching::min_all_queues_at_heads( // const std::vector<Candidate> &set_L, // std::vector<idi> &queue_heads, // const std::vector<idi> &local_queues_ends, // const idi local_queue_length, // const idi L) //{ // const idi num_queues = num_threads_; // idi min_queue_id = num_queues - 1; // Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length]; // // for (idi q_i = 0; q_i < num_queues - 1; ++q_i) { // if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished // continue; // } // const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length]; // if (ele < min_candidate) { // min_candidate = ele; // min_queue_id = q_i; // } else if (ele.id_ == min_candidate.id_) { // Redundant element // ++queue_heads[q_i]; // } // } // // return min_queue_id; //} inline void Searching::merge_in_set_L( std::vector<Candidate> &set_L, const idi set_L_length, const idi num_queues, const idi local_queue_length) { idi size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { const idi merge_length = (local_queue_length << d); idi by = 1 << (d + 1); // Parallel for #pragma omp parallel for for (idi i = 0; i < size; i += by) { // idi a = i + (1 << d) - 1; // idi b = i + (1 << (d + 1)) - 1; idi a = i; idi b = i + (1 << d); idi base_a = a * local_queue_length; idi base_b = b * local_queue_length; if (base_a >= set_L_length || base_b >= set_L_length) { continue; } idi length_b; if (a + by < size) { length_b = merge_length; } else { // The last one if (size == num_queues) { length_b = set_L_length - base_b; } else { length_b = merge_length; } } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } if (size != num_queues) { for (idi i = size; i < num_queues; ++i) { idi a = 0; idi b = i; idi base_a = a; idi base_b = b * local_queue_length; if (base_b >= set_L_length) { continue; } idi length_b; if (b != num_queues - 1) { length_b = local_queue_length; } else { length_b = set_L_length - base_b; } // printf("a: %u b: %u " // "base_a: %u base_b: %u length_b: %u\n", // a, b, // base_a, base_b, length_b); merge_two_consecutive_queues_in_place( set_L, base_a, base_b, length_b); } } } /* * 7/5/2020-20:27 * Every queue keeps only elements which can be ordered in the top-L globally. * local_queues_lengths records the end location for all queues */ inline distf Searching::selecting_top_L_seq( std::vector<Candidate> &set_L, const idi global_L, const idi num_queues, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes) { std::vector<idi> pointers(num_queues, 0); distf bound_lth; idi rank = 0; bool is_finished = false; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < global_L) { is_finished = true; min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (pointers[q_i] >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; idi sub = pointers[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (is_finished) { {//test printf("Error: selecting_top_L_seq: only found %u elements but global_L is %u.\n", rank, global_L); } break; } bound_lth = min_dist; ++pointers[min_q_i]; ++rank; } std::copy(pointers.begin(), pointers.end(), local_queues_sizes.begin()); return bound_lth; } /* * 7/24/2020-10:08 * Record for every queue the position that contains the top-M unchecked vertices. * So the total expanded vertices should still be M, which means the computation should * be the same with merging idea. */ inline void Searching::selecting_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); std::fill(local_m_counts.begin(), local_m_counts.end(), 0); idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (rank < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; // {//test // if (133 == query_id && // 3 == iter && // 321341 == set_L[sub].id_) { // printf("(%u %f)\n", // set_L[sub].id_, set_L[sub].distance_); // } // } while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; ++pointers[min_q_i]; ++rank; ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); } /* * 7/27/2020-15:41 * Gather the top-M unchecked vertices from local queues. */ inline void Searching::gather_unchecked_top_M_seq( const idi query_id, const idi iter, std::vector<Candidate> &set_L, const std::vector<idi> &pointers_starts, const idi value_M, const idi num_queues, const std::vector<idi> &local_queues_starts, const std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, idi &top_m_candidates_size, std::vector<idi> &bound_subs) { std::vector<idi> pointers(pointers_starts); // std::vector<idi> pointers(num_queues, 0); // std::fill(local_m_counts.begin(), local_m_counts.end(), 0); // idi rank = 0; bool is_finished = true; distf min_dist = FLT_MAX; idi min_q_i; idi min_id; while (top_m_candidates_size < value_M) { min_dist = FLT_MAX; for (idi q_i = 0; q_i < num_queues; ++q_i) { idi &pointer = pointers[q_i]; idi sub = pointer + local_queues_starts[q_i]; while (pointer < local_queues_sizes[q_i] && set_L[sub].is_checked_) { ++pointer; ++sub; } if (pointer >= local_queues_sizes[q_i]) { // q_i is finished continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; } } if (!is_finished) { is_finished = true; idi sub = local_queues_starts[min_q_i] + pointers[min_q_i]; top_m_candidates[top_m_candidates_size++] = set_L[sub].id_; set_L[sub].is_checked_ = true; // Checked ++pointers[min_q_i]; // ++rank; // ++local_m_counts[min_q_i]; } else { break; } } // std::copy(pointers.begin(), pointers.end(), local_top_m_positions.begin()); std::copy(pointers.begin(), pointers.end(), bound_subs.begin()); } inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } if (M < value_M_max) { M <<= 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); } } ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids) //// std::vector<idi> &set_K) //{ // dist_max_ = -FLT_MAX; // dist_min_ = FLT_MAX; // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// For histogram // for (idi i_l = 0; i_l < L; ++i_l) { // distf dist = set_L[i_l].distance_; // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } // } // } // } // //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i].id_; //// } //} // ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } // const idi loc_range = L / 3; // // // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // //// {// For histogram //// const distf dist_range = dist_max_ - dist_min_; //// printf("iter:%u\n", 0); //// for (idi i_l = 0; i_l < L; ++i_l) { //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); //// } //// } // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // std::vector<idi> range_count(3, 0); // idi zero_inserted_count = 0; //// {//test //// printf("tmp_count: %u\n", tmp_count); //// } // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// {//test //// printf("top_m_candidates_ends: %u\n", top_m_candidates_end); //// } // { // if (0 == top_m_candidates_end) { // break; // } // } // // // uint64_t count_neighbors = 0; // uint64_t count_inserted = 0; // std::vector<idi> locs_to_count(M); // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // count_neighbors += out_degree; // idi num_inserted = 0; // // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // ++num_inserted; // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); //// { //// printf("c_i: %u " //// "count: %u " //// "loc_inserted: %u\n", //// c_i, //// num_inserted, //// r); //// } // if (r < nk) { // nk = r; // } // { // ++range_count[r / loc_range]; // } // } // { // if (0 == num_inserted) { // ++zero_inserted_count; // } // locs_to_count[c_i] = num_inserted; // count_inserted += num_inserted; // } //// { //// printf("c_i: %u " //// "num_inserted: %u\n", //// c_i, //// num_inserted); //// } // } //// { //// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) { //// locs_to_count[c_i] = 0; //// } //// printf("iter:%u\n", tmp_count); //// for (idi c_i = 0; c_i < M; ++c_i) { //// printf("%u %u\n", c_i, locs_to_count[c_i]); //// } //// } //// {//test //// idi sum = 0; //// for (const idi ct : range_count) sum += ct; //// printf("tmp_count: %u " //// "k: %u " //// "actual_M: %u %.1f%% " //// "zero_ins: %u %.1f%% " //// "1/3: %u %.1f%% " //// "2/3: %u %.1f%% " //// "3/3: %u %.1f%%\n", //// tmp_count, //// k, //// top_m_candidates_end, 100.0 * top_m_candidates_end / M, //// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end, //// range_count[0], 100.0 * range_count[0] / sum, //// range_count[1], 100.0 * range_count[1] / sum, //// range_count[2], 100.0 * range_count[2] / sum); //// } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // { // printf("query:%uiter: %u " // "#neighbors: %lu " // "#inserted: %lu " // "ratio: %.2f%%\n", // query_id, tmp_count, // count_neighbors, // count_inserted, // 100.0 * count_inserted / count_neighbors); // } //// {// For histogram ////// const auto it_min = std::min_element(set_L.begin(), set_L.end()); ////// const auto it_max = std::max_element(set_L.begin(), set_L.end()); ////// const distf dist_min = it_min->distance_; ////// const distf dist_max = it_max->distance_; ////// const distf dist_min = it_min->distance_ - 1.0; ////// const distf dist_max = it_max->distance_ + 1.0; //// const distf dist_range = dist_max_ - dist_min_; ////// const distf dist_range = dist_max - dist_min; ////// { ////// printf("it_min->distance_: %f dist_min: %f\n", ////// it_min->distance_, dist_min); ////// } ////// const distf dist_range = it_max->distance_ - it_min->distance_; //// printf("iter:%u\n", tmp_count); //// for (idi i_l = 0; i_l < L; ++i_l) { ////// printf("%f\n", set_L[i_l].distance_); ////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0); //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); ////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0); //// } //// } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // if (query_id == 3) { // exit(1); // } //} // //// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array //// boost::dynamic_bitset<> is_visited(num_v_); // Bit array // BitVector is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = true; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} ///// Backup //inline void Searching::search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // ////// DEPRECATED: the is_visited array cannot be shared among threads. //inline void Searching::search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } //inline void Searching::para_search_with_top_m_critical_area( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_no_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_yes_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// uint64_t count_visited = 0; // //// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// ++count_visited; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } //// ++count_visited; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // //// { //// printf("query_id: %u " //// "count_visited: %lu %f%%\n", //// query_id, //// count_visited, //// 100.0 * count_visited / num_v_); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } //// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// {// text //// if (query_id == 4 && //// tmp_count == 5) { //// // Print local queues //// for (int t_i = 0; t_i < num_threads_; ++t_i) { ////// idi start_i = t_i * local_queue_length; //// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { //// printf("t[%u][%u]: " //// "id: %u " //// "dist: %f\n", //// t_i, q_i, //// local_queues_list[t_i][q_i].id_, //// local_queues_list[t_i][q_i].distance_); //// } //// } //// printf("----------\n"); //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// printf("----------\n"); //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_list( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// {//test //// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("tmp_count: %u " //// "set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// tmp_count, //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// } //// //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// { //// exit(1); //// } //// {//test //// ////// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } ////// exit(1); ////// } //// } //} // ////// Using local queue and then sequential merge. //inline void Searching::para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ //// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// { //// printf("tmp_count: %u " //// "k: %u\n", //// tmp_count, //// k); //// } // //// unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); //// idi r; ////#pragma omp critical //// { //// r = insert_into_queue(set_L, L, cand); //// if (r < nk) { //// nk = r; //// } //// } // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ ////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; //// const idi local_queue_length = L; //// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); //// std::vector<idi> local_queues_ends(num_threads_, 0); ////// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// // Merge. Merge all queues in parallel. //// { //// if (num_threads_ > 1) { //// idi r = merge_all_queues_para( //// local_queues_list, //// local_queues_ends, //// set_L, //// L); //// if (r < nk) { //// nk = r; //// } //// } else { //// if (local_queues_ends[0]) { //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[0], //// 0, //// local_queues_ends[0]); //// local_queues_ends[0] = 0; //// if (r < nk) { //// nk = r; //// } //// } //// } //// } // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset // is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} //inline void Searching::para_search_with_top_m_merge_queues_in_array( //inline void Searching::para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited) //// std::vector<uint8_t> &is_visited) //// boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // idi min_index = L - 1; // distf min_1st = set_L[min_index].distance_; // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // const idi local_queue_start = tid * local_queue_length; // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// { // Sequential edition //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; //// } //// { // __ATOMIC_SEQ_CST edition //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } //// } //// {// Acquire and Release edition //// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) { //// continue; //// } //// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE); //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // // if (dist > min_1st) { // continue; // } else if (min_index > 0) { // // Inserted, so min_1st needs update // if (dist > set_L[min_index - 1].distance_) { // min_1st = dist; // if (min_index < L - 1) { // ++min_index; // } // } else { // min_1st = set_L[--min_index].distance_; // } //// min_1st = set_L[--min_index].distance_; // } // //// if (dist > set_L[L-1].distance_) { //// continue; //// } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( //// local_queues_list, // local_queues_array, // local_queues_ends, // local_queue_length, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[0], // local_queues_array, // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// // Merge Sequentially //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_seq_fixed( //// set_L, //// 0, //// L, ////// local_queues_list[tid], ////// 0, //// local_queues_array, //// tid * local_queue_length, //// local_queues_ends[tid]); ////// L + 1); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} /* * 5/7/2020-15:14 * Use 1 threads to scale M until the value_M_middle. * Then use multiple threads. */ inline void Searching::para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = L; time_initialization_ += WallTimer::get_time_mark(); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; time_sequential_phase_ -= WallTimer::get_time_mark(); { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); uint64_t tmp_count_add_to_queue = 0; double tmp_time_pick_top_m = 0; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0.0; { // Multiple Threads while (k < L) { time_expand_ -= WallTimer::get_time_mark(); ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; time_pick_top_m_ -= WallTimer::get_time_mark(); // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } time_pick_top_m_ += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. #pragma omp parallel for reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_add_to_queue) // for (int tid = 0; tid < num_threads_; ++tid) { for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); // Add to the local queue. // tmp_time_pick_top_m -= WallTimer::get_time_mark(); tmp_time_add_to_queue -= WallTimer::get_time_mark(); if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } tmp_time_add_to_queue += WallTimer::get_time_mark(); // tmp_time_pick_top_m += WallTimer::get_time_mark(); } } time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; // } time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; top_m_candidates_end = 0; // Clear top_m_candidates count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; time_expand_ += WallTimer::get_time_mark(); // // Merge. Merge all queues in parallel. { time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } time_parallel_phase_ += WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { uint64_t count_single_query_computation = 0; uint64_t count_init_computation = 0; uint64_t count_seq_computation = 0; uint64_t count_par_computation = 0; // {//test // printf("query_id: %u\n", query_id); // } // time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { #pragma omp parallel for for (idi c_i = 0; c_i < init_size; ++c_i) { // for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < init_size; ++v_i) { // for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < init_size; i++) { // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; count_init_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + init_size); // set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = init_size; // local_queues_ends[num_threads_ - 1] = L; // time_initialization_ += WallTimer::get_time_mark(); // time_sequential_phase_ -= WallTimer::get_time_mark(); // std::vector<idi> top_m_candidates(M); idi &global_queue_size = local_queues_ends[num_threads_ - 1]; idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_seq_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } // time_sequential_phase_ += WallTimer::get_time_mark(); // time_parallel_phase_ -= WallTimer::get_time_mark(); { // Multiple Threads while (k < L and count_single_query_computation <= computation_threshold) { // while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d " // "k: %u " // "global_queue_size: %u\n", // tmp_count, // k, // global_queue_size); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_par_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { // idi r = merge_all_queues_queue_base( // set_L, // local_queues_ends, // queue_base, // real_threads, // local_queue_length, // L); idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } // {// Print relative distance //// distf top_dist = set_L[base_set_L].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l + base_set_L].distance_); //// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist); // } // } } } // time_parallel_phase_ += WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } // {//test // printf("count_single: %lu " // "ct_init: %lu " // "ct_seq: %lu " // "ct_par: %lu\n", // count_single_query_computation, // count_init_computation, // count_seq_computation, // count_par_computation); // } } ///* // * 6/15/2020-14:40 // * Queues merging together to the global queue // */ //inline void Searching::para_search_with_top_m_merge_queues_sequential_merge( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// if (num_threads_ == 2) { //// printf("tmp_count: %d " //// "k: %u\n", //// tmp_count, //// k); //// } //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// {//test //// for (idi q_i = 0; q_i < num_threads_; ++q_i) { //// if (0 == local_queues_ends[q_i]) { //// continue; //// } //// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) { //// printf("tmp_count: %u " //// "q_i: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// q_i, //// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_); //// } //// } //// } //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_all_together_in_sequential( // set_L, // local_queues_ends, // local_queue_length, // L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); // if (r < nk) { // nk = r; // } //// {//test //// printf("tmp_count: %u " //// "r: %u " //// "last_k: %u\n", //// tmp_count, //// r, //// last_k); //// for (idi l_i = 0; l_i < L; ++l_i) { //// printf("tmp_count: %u " //// "[%u]: (%u, %f)\n", //// tmp_count, //// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_); //// } //// } // } // //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/19/2020: // * Intra-query + Inter-query // */ //inline void Searching::para_search_with_top_m_nested_para( // const idi batch_start, // const idi batch_size, // const idi value_M_middle, // const idi value_M_max, // const idi K, // const idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_intra_query_ - 1) * local_queue_length; // std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue // std::vector< std::vector<idi> > &top_m_candidates_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list) //{ // {// Initialize is_visited flag array //#pragma omp parallel for num_threads(num_threads_inter_query_) // for (idi q_i = 0; q_i < batch_size; ++q_i) { // auto &is_visited = is_visited_list[q_i]; //#pragma omp parallel for num_threads(num_threads_intra_query_) // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // } // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // uint64_t tmp_count_total_computation = 0; //#pragma omp parallel for num_threads(num_threads_inter_query_) reduction(+ : tmp_count_total_computation) // for (idi q_i = 0; q_i < batch_size; ++q_i) { // idi query_id = batch_start + q_i; // auto &set_L = set_L_list[q_i]; // auto &local_queues_ends = local_queues_ends_list[q_i]; // auto &is_visited = is_visited_list[q_i]; // // const dataf *query_data = queries_load_ + query_id * dimension_; ////#pragma omp parallel for //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_intra_query_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // auto &top_m_candidates = top_m_candidates_list[q_i]; // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_query_) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); //// {//test //// if (391655 == nb_id) { //// printf("tmp_count: %u " //// "nb_id: %u " //// "distf: %f\n", //// tmp_count, //// nb_id, //// dist); //// } //// } // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_intra_query_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates //// count_distance_computation_ += tmp_count_computation; // tmp_count_total_computation += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { //// time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_intra_query_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } //// time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // count_distance_computation_ += tmp_count_total_computation; // tmp_count_total_computation = 0; // // auto &set_K = set_K_list[query_id]; // //#pragma omp parallel for num_threads(num_threads_intra_query_) // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // } // //// {//test //// if (3 == query_id) { //// exit(1); //// } //// } //// { //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: (%u %f)\n", //// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_); //// } //// if (0 == batch_start) { //// exit(1); //// } //// } //} /* * 6/22/2020-21:30 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; idi M = 1; // value of M while (k < local_L) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, local_L, set_L, set_L_start, set_L_size, local_top_m_candidates, is_visited, local_count_distance_computation); {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } // {//test // printf("set_L_start: %u " // "local_count_distance_computation: %lu\n", // set_L_start, // local_count_distance_computation); // } } //// Backup //inline void Searching::subsearch_with_top_m( // const idi value_M_max, // const idi query_id, // const idi local_L, // std::vector<Candidate> &set_L, // const idi base_set_L, // idi &set_L_end, // std::vector<idi> &local_top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &local_count_distance_computation) //{ // const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi iter = 0; // idi M = 1; // value of M // // while (k < local_L) { // ++iter; // // Select M candidates // idi last_k = local_L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < set_L_end && local_top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = local_L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < local_top_m_candidates_end; ++c_i) { // idi cand_id = local_top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++local_count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // set_L_end, // local_L, // cand); // if (r < nk) { // nk = r; // } // } // } // local_top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } //} /* * 7/6/2020-23:17 * Subsearch only 1 iteration using top-m */ inline void Searching::subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation) { // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[set_L_size - 1 + set_L_start].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } } } // top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } // {//test // for (idi l_i = 0; l_i < set_L_size; ++l_i) { // L_ids_.push_back(set_L[set_L_start + l_i].id_); // } // std::sort(L_ids_.begin(), L_ids_.end()); // std::sort(M_ids_.begin(), M_ids_.end()); // for (idi m_i = 0; m_i < M_ids_.size(); ++m_i) { // printf("query_id: %u " // "iter: %u " // "M[%u]: " // "%u\n", // query_id, // iter, // m_i, // M_ids_[m_i]); // } // M_ids_.clear(); // for (idi l_i = 0; l_i < L_ids_.size(); ++l_i) { // printf("query_id: %u " // "iter: %u " // "L[%u]: " // "%u\n", // query_id, // iter, // l_i, // L_ids_[l_i]); // } // L_ids_.clear(); // } } ///* // * One more parameter for distance bound // */ //inline void Searching::subsearch_top_m_for_one_iteration_lth( // const distf bound_lth, // const idi iter, // idi &k_uc, // const idi value_M, // const idi query_id, // const dataf *query_data, // const idi L, // std::vector<Candidate> &set_L, // const idi set_L_start, // idi &set_L_size, // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited, // uint64_t &count_distance_computation) //{ // // Select M candidates // idi top_m_candidates_end = 0; // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { // idi index_set_L = c_i + set_L_start; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > bound_lth) { // continue; // } // // Candidate cand(nb_id, dist, false); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L, // cand); // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k_uc = nk; // } else { // k_uc = last_k + 1; // } //} /* * 7/24/2020-10:53 * Subsearch for one iteration, with the global L-th value as the bound, * and the top_m_position indicates the bound for local top-M vertices. */ inline void Searching::subsearch_top_m_for_one_iteration_lth_mth( const distf bound_lth, // const idi top_m_position, const idi iter, idi &k_uc, const idi local_m_count, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation, double &time_pick_top_m, uint64_t &count_add_to_queue, double &time_distance_computation, double &time_add_to_queue) { // {//test // printf("query_id: %u " // "iter: %u " // "tid: %u \n", // query_id, // iter, // omp_get_thread_num()); // } // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k_uc; c_i < top_m_position; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < local_m_count; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // {//test // M_ids_.push_back(set_L[index_set_L].id_); // } } time_pick_top_m += WallTimer::get_time_mark(); idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { time_pick_top_m -= WallTimer::get_time_mark(); idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } time_pick_top_m += WallTimer::get_time_mark(); for (idi e_i = 0; e_i < out_degree; ++e_i) { time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { // if (dist > bound_lth) { continue; } ++count_add_to_queue; Candidate cand(nb_id, dist, false); // time_pick_top_m -= WallTimer::get_time_mark(); time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L, cand); if (r < nk) { nk = r; } time_add_to_queue += WallTimer::get_time_mark(); // time_pick_top_m += WallTimer::get_time_mark(); } } if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } } /* * 7/26/2020-15:41 * L-th and M-th Selection. * Seq-Par Phases: when M is 1 and 2, do sequential searching; * When M is equal and larger than 4, do parallel searching. * It's for load-balance issue. */ inline void Searching::para_search_with_top_m_subsearch_v3( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts, std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < global_L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < global_L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi id_i = 0; id_i < global_L; ++id_i) { idi v_id = init_ids[id_i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. } local_queues_sizes[0] = global_L; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + global_L); } time_initialization_ += WallTimer::get_time_mark(); // Searching if (num_threads_ == 1) { // Single threads // std::sort( // set_L.begin(), // set_L.end()); subsearch_with_top_m( local_M_max, query_id, local_L, set_L, 0, local_queues_sizes[0], top_m_candidates_list[0], is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; } else { // Multiple threads const dataf *query_data = queries_load_ + query_id * dimension_; const idi num_queues = num_threads_; idi local_M = 1; idi iter = 0; std::vector<idi> ks(num_queues, 0); time_sequential_phase_ -= WallTimer::get_time_mark(); {// Sequential Search for M = 1, 2. idi &k = ks[0]; while (k < global_L && local_M < local_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, local_M, query_id, query_data, global_L, set_L, 0, local_queues_sizes[0], top_m_candidates_list[0], is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; {// Double M if (local_M < local_M_max) { local_M <<= 1; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); distf bound_lth = set_L[global_L - 1].distance_; {// Parallel Search for M >= 4, or local_M_middle time_assign_s_ -=WallTimer::get_time_mark(); {// Assign elements from Queue[0] to others idi dst_i = 1; for (idi e_i = 1; e_i < global_L; ++e_i) { idi dest_sub = e_i % num_queues; if (0 == dest_sub) { set_L[dst_i++] = set_L[e_i]; } else { set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; } } local_queues_sizes[0] = dst_i; } std::fill(ks.begin(), ks.end(), 0); selecting_unchecked_top_M_seq( query_id, iter, set_L, ks, local_M, num_queues, local_queues_starts, local_queues_sizes, local_m_counts); time_assign_s_ +=WallTimer::get_time_mark(); double tmp_time_pick_top_m = 0; uint64_t tmp_count_add_to_queue = 0; uint8_t not_finished = 1; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0; while (true) { time_expand_ -= WallTimer::get_time_mark(); not_finished = 0; ++iter; #pragma omp parallel for reduction(+ : tmp_count_computation) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_add_to_queue) for (idi q_i = 0; q_i < num_queues; ++q_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); idi L_value = q_i == 0 ? global_L : local_L; idi &k = ks[q_i]; idi &local_queue_size = local_queues_sizes[q_i]; auto &local_top_m_candidates = top_m_candidates_list[q_i]; idi local_m_count = local_m_counts[q_i]; // if (local_M < num_queues && !local_m_count) { // local_m_count = 1; // } tmp_time_pick_top_m += WallTimer::get_time_mark(); if (!local_m_count) { continue; } not_finished = 1; const idi local_queue_start = local_queues_starts[q_i]; subsearch_top_m_for_one_iteration_lth_mth( bound_lth, iter, k, local_m_count, query_id, query_data, L_value, set_L, local_queue_start, local_queue_size, local_top_m_candidates, is_visited, tmp_count_computation, tmp_time_pick_top_m, tmp_count_add_to_queue, tmp_time_distance_computation, tmp_time_add_to_queue); } time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; time_expand_ += WallTimer::get_time_mark(); if (!not_finished) { break; } {// Scale M if (local_M < local_M_max) { local_M <<= 1; } // else { // local_M = value_M_max; // } } time_select_ -= WallTimer::get_time_mark(); #pragma omp parallel sections { #pragma omp section {// Setecting and update local_queues_lengths // time_select_L_ -= WallTimer::get_time_mark(); bound_lth = selecting_top_L_seq( set_L, global_L, // local_L, num_queues, local_queues_starts, local_queues_sizes); // time_select_L_ += WallTimer::get_time_mark(); } #pragma omp section { // time_select_M_ -= WallTimer::get_time_mark(); selecting_unchecked_top_M_seq( query_id, iter, set_L, ks, local_M, num_queues, local_queues_starts, local_queues_sizes, local_m_counts); // time_select_M_ += WallTimer::get_time_mark(); } } time_select_ += WallTimer::get_time_mark(); // {//test // printf("query_id: %u " // "iter: %u", // query_id, // iter); // printf(" local_queues_sizes:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_queues_sizes[i]); // } // printf(" local_m_counts:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_m_counts[i]); // } // printf(" ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", ks[i]); // } // printf("\n"); // } } } time_parallel_phase_ += WallTimer::get_time_mark(); } // time_merge_ -= WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); {// Return the results to set_K std::vector<idi> pointer(num_threads_, 0); // get the first distf min_dist = FLT_MAX; idi min_q_i; idi min_id; idi min_sub; idi last_id; for (int q_i = 0; q_i < num_threads_; ++q_i) { if (pointer[q_i] >= local_queues_sizes[q_i]) { continue; } idi sub = pointer[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[0] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // 0, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; last_id = set_K[0]; bool is_finished = false; idi k_i = 1; while (k_i < K && !is_finished) { is_finished = true; min_dist = FLT_MAX; for (int q_i = 0; q_i < num_threads_; ++q_i) { const idi local_queue_size = local_queues_sizes[q_i]; idi sub = pointer[q_i] + local_queues_starts[q_i]; while (pointer[q_i] < local_queue_size && set_L[sub].id_ == last_id) { ++pointer[q_i]; ++sub; } if (pointer[q_i] >= local_queue_size) { continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[k_i] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // k_i, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; ++k_i; } } // time_merge_ += WallTimer::get_time_mark(); {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); } time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } /* * 7/27/2020-15:33 * Same with v3, but gather top-m vertices together */ inline void Searching::para_search_with_top_m_subsearch_v4( const idi local_M_max, const idi local_M_middle, const idi query_id, const idi K, const idi global_L, const idi local_L, // const idi total_L, // const idi init_queue_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, std::vector<idi> &local_m_counts, std::vector<idi> &top_m_candidates, // std::vector< std::vector<idi> > &top_m_candidates_list, boost::dynamic_bitset<> &is_visited) { time_initialization_ -= WallTimer::get_time_mark(); uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < global_L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < global_L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi id_i = 0; id_i < global_L; ++id_i) { idi v_id = init_ids[id_i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. } local_queues_sizes[0] = global_L; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + global_L); } time_initialization_ += WallTimer::get_time_mark(); // Searching if (num_threads_ == 1) { // Single threads // std::sort( // set_L.begin(), // set_L.end()); subsearch_with_top_m( local_M_max, query_id, local_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; } else { // Multiple threads const dataf *query_data = queries_load_ + query_id * dimension_; const idi num_queues = num_threads_; idi local_M = 1; idi iter = 0; // std::vector<idi> ks(num_queues, 0); time_sequential_phase_ -= WallTimer::get_time_mark(); {// Sequential Search for M = 1, 2. idi k = 0; // idi &k = ks[0]; while (k < global_L && local_M < local_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, local_M, query_id, query_data, global_L, set_L, 0, local_queues_sizes[0], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; {// Double M if (local_M < local_M_max) { local_M <<= 1; } } } } time_sequential_phase_ += WallTimer::get_time_mark(); time_parallel_phase_ -= WallTimer::get_time_mark(); distf bound_lth = set_L[global_L - 1].distance_; {// Parallel Search for M >= 4, or local_M_middle time_assign_s_ -=WallTimer::get_time_mark(); {// Assign elements from Queue[0] to others idi dst_i = 1; for (idi e_i = 1; e_i < global_L; ++e_i) { idi dest_sub = e_i % num_queues; if (0 == dest_sub) { set_L[dst_i++] = set_L[e_i]; } else { set_L[local_queues_starts[dest_sub] + local_queues_sizes[dest_sub]++] = set_L[e_i]; } } local_queues_sizes[0] = dst_i; } // std::fill(ks.begin(), ks.end(), 0); idi top_m_candidates_size = 0; // selecting_unchecked_top_M_seq( // query_id, // iter, // set_L, // ks, // local_M, // num_queues, // local_queues_starts, // local_queues_sizes, // local_m_counts); time_assign_s_ +=WallTimer::get_time_mark(); std::vector<idi> ks(num_queues, 0); std::vector<idi> nks(num_queues); std::vector<idi> bound_ks(num_queues); double tmp_time_pick_top_m = 0; uint64_t tmp_count_add_to_queue = 0; uint8_t not_finished = 1; double tmp_time_distance_computation = 0; double tmp_time_add_to_queue = 0; while (true) { time_expand_ -= WallTimer::get_time_mark(); not_finished = 0; ++iter; // Gather top-M vertices time_pick_top_m_ -= WallTimer::get_time_mark(); gather_unchecked_top_M_seq( query_id, iter, set_L, ks, local_M, num_queues, local_queues_starts, local_queues_sizes, top_m_candidates, top_m_candidates_size, bound_ks); time_pick_top_m_ += WallTimer::get_time_mark(); if (!top_m_candidates_size) { time_expand_ += WallTimer::get_time_mark(); break; } std::fill(nks.begin(), nks.end(), global_L); // Expand top-M vertices #pragma omp parallel for schedule(static, 1) \ reduction(+ : tmp_count_computation) \ reduction(+ : tmp_count_add_to_queue) \ reduction(+ : tmp_time_distance_computation) \ reduction(+ : tmp_time_pick_top_m) \ reduction(+ : tmp_time_add_to_queue) for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { tmp_time_pick_top_m -= WallTimer::get_time_mark(); idi tid = omp_get_thread_num(); const idi set_L_start = local_queues_starts[tid]; idi &set_L_size = local_queues_sizes[tid]; idi &nk = nks[tid]; idi L_value = tid == 0 ? global_L : local_L; idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } tmp_time_pick_top_m += WallTimer::get_time_mark(); // Expand cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { tmp_time_distance_computation -= WallTimer::get_time_mark(); idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { tmp_time_distance_computation += WallTimer::get_time_mark(); continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); tmp_time_distance_computation += WallTimer::get_time_mark(); if (dist > set_L[set_L_start + set_L_size - 1].distance_) { // if (dist > bound_lth) { continue; } ++tmp_count_add_to_queue; Candidate cand(nb_id, dist, false); tmp_time_add_to_queue -= WallTimer::get_time_mark(); idi r = add_into_queue( set_L, set_L_start, set_L_size, L_value, cand); if (r < nk) { nk = r; } tmp_time_add_to_queue += WallTimer::get_time_mark(); } } top_m_candidates_size = 0; time_add_to_queue_ += tmp_time_add_to_queue; tmp_time_add_to_queue = 0; time_distance_computation_ += tmp_time_distance_computation; tmp_time_distance_computation = 0; count_add_to_queue_ += tmp_count_add_to_queue; tmp_count_add_to_queue = 0; time_pick_top_m_ += tmp_time_pick_top_m; tmp_time_pick_top_m = 0; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; for (idi q_i = 0; q_i < num_queues; ++q_i) { if (nks[q_i] < bound_ks[q_i]) { ks[q_i] = nks[q_i]; } else { ks[q_i] = bound_ks[q_i]; } } time_expand_ += WallTimer::get_time_mark(); time_select_ -= WallTimer::get_time_mark(); {// Select L-th bound_lth = selecting_top_L_seq( set_L, global_L, num_queues, local_queues_starts, local_queues_sizes); } time_select_ += WallTimer::get_time_mark(); {// Scale M if (local_M < local_M_max) { local_M <<= 1; } } // {//test // printf("query_id: %u " // "iter: %u", // query_id, // iter); // printf(" local_queues_sizes:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_queues_sizes[i]); // } // printf(" local_m_counts:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", local_m_counts[i]); // } // printf(" ks:"); // for (idi i = 0; i < num_queues; ++i) { // printf(" %u", ks[i]); // } // printf("\n"); // } } } time_parallel_phase_ += WallTimer::get_time_mark(); } // time_merge_ -= WallTimer::get_time_mark(); time_ending_ -= WallTimer::get_time_mark(); {// Return the results to set_K std::vector<idi> pointer(num_threads_, 0); // get the first distf min_dist = FLT_MAX; idi min_q_i; idi min_id; idi min_sub; idi last_id; for (int q_i = 0; q_i < num_threads_; ++q_i) { if (pointer[q_i] >= local_queues_sizes[q_i]) { continue; } idi sub = pointer[q_i] + local_queues_starts[q_i]; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[0] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // 0, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; last_id = set_K[0]; bool is_finished = false; idi k_i = 1; while (k_i < K && !is_finished) { is_finished = true; min_dist = FLT_MAX; for (int q_i = 0; q_i < num_threads_; ++q_i) { const idi local_queue_size = local_queues_sizes[q_i]; idi sub = pointer[q_i] + local_queues_starts[q_i]; while (pointer[q_i] < local_queue_size && set_L[sub].id_ == last_id) { ++pointer[q_i]; ++sub; } if (pointer[q_i] >= local_queue_size) { continue; } is_finished = false; distf tmp_dist = set_L[sub].distance_; idi tmp_id = set_L[sub].id_; if (tmp_dist < min_dist) { min_dist = tmp_dist; min_id = tmp_id; min_q_i = q_i; min_sub = sub; } else if (tmp_dist == min_dist && tmp_id < min_id) { min_id = tmp_id; min_q_i = q_i; min_sub = sub; } } set_K[k_i] = set_L[min_sub].id_; // {//test // printf("query_id: %u " // "[%u]: " // "(%u, %f)\n", // query_id, // k_i, // set_L[min_sub].id_, set_L[min_sub].distance_); // } ++pointer[min_q_i]; ++k_i; } } // time_merge_ += WallTimer::get_time_mark(); {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); std::fill(local_queues_sizes.begin() + 1, local_queues_sizes.end(), 0); } time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } /* * 6/27/2020-12:33 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_for_simple_search( const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi base_set_L, idi &set_L_end, // std::vector<uint8_t> &is_visited, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; // idi M = 1; // value of M while (k < local_L) { ++iter; // {//test // printf("query_id: %u " // "iter: %u\n", // query_id, // iter); // } // Select the top-1 unchecked candidate idi top_1; idi last_k = local_L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < set_L_end; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } top_1 = set_L[index_set_L].id_; last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; // local_top_m_candidates[local_top_m_candidates_end++] = set_L[index_set_L].id_; break; } if (last_k == local_L) { break; } idi nk = local_L; // Push top-1' neighbors into the queue. idi cand_id = top_1; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } // {// Critical edition // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++local_count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // { // if (0 == query_id // && (785802 == nb_id // || 180955 == nb_id // || 240996 == nb_id // || 813701 == nb_id // || 708177 == nb_id // || 87578 == nb_id // || 561813 == nb_id // || 701258 == nb_id // || 872728 == nb_id)) { //// && 180955 == nb_id) { // printf("parent: %u " // "nb_id: %u " // "dist: %f " // "base_set_L: %u " // "set_L_end: %u\n", // cand_id, // nb_id, // dist, // base_set_L, // set_L_end); // } // } if (dist > set_L[set_L_end - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, set_L_end, local_L, cand); if (r < nk) { nk = r; } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } } /* * 6/27/2020-12:26 * Is is good to use subsearch by every thread it self? */ inline void Searching::para_simple_search_subsearch( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited) { uint64_t tmp_count_computation = 0; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort( // set_L.begin(), // set_L.begin() + L); } idi queue_end = L; // Searching if (num_threads_ == 1) { // Single threads std::sort( set_L.begin(), set_L.end()); subsearch_for_simple_search( query_id, L, set_L, 0, queue_end, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; // { //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("start: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // idi half_length = queue_end / 2; // std::sort( // set_L.begin(), // set_L.begin() + half_length); //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // 0, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); // //// {//test //// for (idi i = 0; i < half_length; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // std::sort( // set_L.begin() + half_length, // set_L.end()); // //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("sorted: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // // subsearch_for_simple_search( // query_id, // half_length, // local_L // set_L, // half_length, // base_set_L // half_length, // set_L_end // is_visited, // tmp_count_computation); //// {//test //// for (idi i = half_length; i < queue_end; ++i) { //// printf("subsearched: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("explored: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // count_distance_computation_ += tmp_count_computation; // // std::vector <Candidate> tmp_set_L(L); // std::merge(set_L.begin(), set_L.begin() + half_length, // set_L.begin() + half_length, set_L.end(), // tmp_set_L.begin()); // std::copy(tmp_set_L.begin(), tmp_set_L.end(), set_L.begin()); //// {//test //// for (idi i = 0; i < queue_end; ++i) { //// printf("merged: " //// "query_id: %u " //// "set_L[%u]: " //// "(%u %f)\n", //// query_id, //// i, //// set_L[i].id_, set_L[i].distance_); //// } //// } // } } else { // Multiple threads const idi num_queues = num_threads_; const idi local_queue_length = (L - 1) / num_queues + 1; // Parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi q_i = 0; q_i < num_queues; ++q_i) { idi local_queue_base = q_i * local_queue_length; if (local_queue_base >= L) { continue; } idi local_queue_end = local_queue_length; if (local_queue_base + local_queue_end > L) { local_queue_end = L - local_queue_base; } std::sort( set_L.begin() + local_queue_base, set_L.begin() + local_queue_base + local_queue_end); subsearch_for_simple_search( query_id, local_queue_end, // local_L set_L, local_queue_base, // base_set_L local_queue_end, // set_L_end is_visited, tmp_count_computation); } count_distance_computation_ += tmp_count_computation; // Merge // time_merge_ -= WallTimer::get_time_mark(); merge_in_set_L( set_L, L, num_queues, local_queue_length); // time_merge_ += WallTimer::get_time_mark(); } {// Return the results to set_K // How to deal with duplicate? idi last_id = set_L[0].id_; set_K[0] = last_id; idi k_i = 1; idi l_i = 1; while (k_i < K && l_i < L) { if (last_id == set_L[l_i].id_) { ++l_i; continue; } last_id = set_L[l_i++].id_; set_K[k_i++] = last_id; } //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; //// set_K[k_i] = set_L[k_i].id_; // } } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); } // {//test // if (0 == query_id) { // exit(1); // } // } } ///* // * 6/22/2020-09:38 // * A synchronized last element as the sentinel // */ //inline void Searching::para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Local queues' ends //// printf("query%u:iter: %u", query_id, tmp_count); // idi total_elements = 0; // for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) { // total_elements += local_queues_ends[i_t]; // } // number_local_elements_ += total_elements; //// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]); //// for (int i_t = 0; i_t < num_threads_; ++i_t) { //// printf(" [%u]: %u", i_t, local_queues_ends[i_t]); //// } //// printf("\n"); // } // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } // time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/7/2020-16:55 // * Use 1 threads to scale M until the value_M_middle. // * Then use multiple threads. // * Except for Thread 0, other threads are collectors. They collect, but do not merge. // * Only merge once after Thread 0 stops. // */ //inline void Searching::para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi chunk_size; // if (num_threads_ <= top_m_candidates_end) { // chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1; // } else { // chunk_size = 1; // } // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) ////#pragma omp parallel for reduction(+ : tmp_count_computation) //#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); //// { //// if (c_i < chunk_size && tid != 0) { //// printf("query_id: %u " //// "tmp_count: %u " //// "chunk_size: %u " //// "c_i: %u " //// "tid: %u\n", //// query_id, //// tmp_count, //// chunk_size, //// c_i, //// tid); //// } //// } // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // ////// // Merge. Merge all queues in parallel. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// if (r < nk) { //// nk = r; //// } //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // //// // Merge only once after Master Thread stops. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/8/2020-16:39 // * Selecting rather than merging // */ //inline void Searching::para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { //// while (k < L) { // while (true) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// // Select M candidates //// idi last_k = L; ////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. //// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { //// idi index_set_L = c_i + base_set_L; //// if (set_L[index_set_L].is_checked_) { //// continue; //// } //// last_k = c_i; // Record the location of the last candidate selected. //// set_L[index_set_L].is_checked_ = true; //// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; //// } // // // Select M candidates // { // idi traverse_count = 0; // idi bound_sub = L; // This is not always true! // for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) { // for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) { // if (sub >= local_queues_ends[tid]) { // continue; // } // idi index_set_L = tid * local_queue_length + sub; // if (set_L[index_set_L].is_checked_) { // continue; // } // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // } // // if (0 == top_m_candidates_end) { // break; // } // } // //// idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue //// idi r = // add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); //// if (r < nk) { //// nk = r; //// } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { //// idi r = merge_all_queues_queue_base( //// set_L, //// local_queues_ends, //// queue_base, //// real_threads, //// local_queue_length, //// L); //// idi r = // merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); //// if (r < nk) { //// nk = r; //// } // } // time_merge_ += WallTimer::get_time_mark(); // } //// if (nk <= last_k) { //// k = nk; //// } else { //// k = last_k + 1; //// } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // ////#pragma omp parallel for //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i + base_set_L].id_; ////// set_K[k_i] = set_L[k_i].id_; //// } // // { // idi k_i = 0; // idi bound_sub = K / num_threads_; // for (idi sub = 0; sub < bound_sub; ++sub) { // for (int tid = 0; tid < num_threads_; ++tid) { // idi index_set_L = tid * local_queue_length + sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // idi remain = K - k_i; // if (remain) { // for (int tid = 0; tid < remain; ++tid) { // idi index_set_L = tid * local_queue_length + bound_sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
three_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2021 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_THREE_STEP_V_P_STRATEGY_H #define KRATOS_THREE_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include "v_p_strategy.h" #include <stdio.h> #include <math.h> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class ThreeStepVPStrategy : public VPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ThreeStepVPStrategy); typedef VPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ ThreeStepVPStrategy(ModelPart &rModelPart, typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart, pVelocityLinearSolver, pPressureLinearSolver, ReformDofSet, DomainSize), mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~ThreeStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); const auto &r_current_process_info = rModelPart.GetProcessInfo(); for (const auto &r_element : rModelPart.Elements()) { ierr = r_element.Check(r_current_process_info); if (ierr != 0) { break; } } return ierr; KRATOS_CATCH(""); } void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveSolutionStep() override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; bool converged = false; double NormV = 0; unsigned int maxNonLinearIterations = mMaxPressureIter; KRATOS_INFO("\nSolution with three_step_vp_strategy at t=") << currentTime << "s" << std::endl; bool momentumConverged = true; bool continuityConverged = false; this->SetBlockedAndIsolatedFlags(); // this->FreePressure(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { KRATOS_INFO("\n ------------------- ITERATION ") << it << " ------------------- " << std::endl; // 1. Compute first-step velocity rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); // this->FixPressure(); } else { this->RecoverFractionalVelocity(); } momentumConverged = this->SolveFirstVelocitySystem(NormV); // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } continuityConverged = this->SolveContinuityIteration(); // 3. Compute end-of-step velocity this->CalculateEndOfStepVelocity(NormV); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); if (continuityConverged && momentumConverged) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; //double tensilStressSign = -1.0; // ComputeErrorL2Norm(tensilStressSign); this->UpdateStressStrain(); KRATOS_INFO("ThreeStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } else if (it == (maxNonLinearIterations - 1) && it != 0) { //double tensilStressSign = -1.0; // ComputeErrorL2Norm(tensilStressSign); this->UpdateStressStrain(); } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); return converged; } void FinalizeSolutionStep() override { } void InitializeSolutionStep() override { } void UpdateStressStrain() override { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { itElem->InitializeSolutionStep(rCurrentProcessInfo); } } this->CalculateTemporalVariables(); } void CalculateTemporalVariables() override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval; CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval; } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; // noalias(CurrentAcceleration) = 2.0 * (CurrentVelocity - PreviousVelocity) / Dt - PreviousAcceleration; // 2nd order noalias(CurrentAcceleration) = (CurrentVelocity - PreviousVelocity) / Dt; // 1st order } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "ThreeStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "ThreeStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables. */ bool SolveFirstVelocitySystem(double &NormV) { std::cout << "1. SolveFirstVelocitySystem " << std::endl; bool momentumConvergence = false; double NormDv = 0; // build momentum system and solve for fractional step velocity increment NormDv = mpMomentumStrategy->Solve(); // Check convergence momentumConvergence = this->CheckVelocityIncrementConvergence(NormDv, NormV); if (!momentumConvergence && BaseType::GetEchoLevel() > 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return momentumConvergence; } bool SolveContinuityIteration() { std::cout << "2. SolveContinuityIteration " << std::endl; bool continuityConvergence = false; double NormDp = 0; NormDp = mpPressureStrategy->Solve(); continuityConvergence = CheckPressureIncrementConvergence(NormDp); // continuityConvergence = true; if (!continuityConvergence && BaseType::GetEchoLevel() > 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return continuityConvergence; } void CalculateEndOfStepVelocity(const double NormV) { std::cout << "3. CalculateEndOfStepVelocity()" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); const int n_elems = rModelPart.NumberOfElements(); array_1d<double, 3> Out = ZeroVector(3); VariableUtils().SetHistoricalVariableToZero(FRACT_VEL, rModelPart.Nodes()); VariableUtils().SetHistoricalVariableToZero(NODAL_VOLUME, rModelPart.Nodes()); #pragma omp parallel for for (int i_elem = 0; i_elem < n_elems; ++i_elem) { const auto it_elem = rModelPart.ElementsBegin() + i_elem; it_elem->Calculate(VELOCITY, Out, rModelPart.GetProcessInfo()); Element::GeometryType &geometry = it_elem->GetGeometry(); double elementalVolume = 0; if (mDomainSize == 2) { elementalVolume = geometry.Area() / 3.0; } else if (mDomainSize == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; } } rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); if (mDomainSize > 2) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalVolume = it_node->FastGetSolutionStepValue(NODAL_VOLUME); double fractionalVelocity = 0; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_X); // VELOCITY_X stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalVolume; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_X) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } if (!it_node->IsFixed(VELOCITY_Y)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_Y); // VELOCITY_Y stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalVolume; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_Y) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } if (!it_node->IsFixed(VELOCITY_Z)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_Z); // VELOCITY_Z stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_Z) += it_node->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalVolume; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_Z) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } } } } else { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_VOLUME); double fractionalVelocity = 0; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_X); // VELOCITY_X stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_X) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } if (!it_node->IsFixed(VELOCITY_Y)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_Y); // VELOCITY_Y stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; //here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_Y) = fractionalVelocity; //now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } } } } this->CheckVelocityConvergence(NormV); } void RecoverFractionalVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); if (mDomainSize > 2) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { it_node->FastGetSolutionStepValue(VELOCITY_X) = it_node->FastGetSolutionStepValue(FRACT_VEL_X); } if (!it_node->IsFixed(VELOCITY_Y)) { it_node->FastGetSolutionStepValue(VELOCITY_Y) = it_node->FastGetSolutionStepValue(FRACT_VEL_Y); } if (!it_node->IsFixed(VELOCITY_Z)) { it_node->FastGetSolutionStepValue(VELOCITY_Z) = it_node->FastGetSolutionStepValue(FRACT_VEL_Z); } } } } else { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { it_node->FastGetSolutionStepValue(VELOCITY_X) = it_node->FastGetSolutionStepValue(FRACT_VEL_X); } if (!it_node->IsFixed(VELOCITY_Y)) { it_node->FastGetSolutionStepValue(VELOCITY_Y) = it_node->FastGetSolutionStepValue(FRACT_VEL_Y); } } } } } bool CheckVelocityIncrementConvergence(const double NormDv, double &NormV) { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); NormV = 0.00; double errorNormDv = 0; double temp_norm = NormV; #pragma omp parallel for reduction(+ : temp_norm) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { temp_norm += r_vel[d] * r_vel[d]; } } NormV = temp_norm; NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); const double zero_tol = 1.0e-12; errorNormDv = (NormV < zero_tol) ? NormDv : NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } if (errorNormDv < mVelocityTolerance) { std::cout << "The norm of velocity is: " << NormV << " The norm of velocity increment is: " << NormDv << " Velocity error: " << errorNormDv << " Converged!" << std::endl; return true; } else { std::cout << "The norm of velocity is: " << NormV << " The norm of velocity increment is: " << NormDv << " Velocity error: " << errorNormDv << " Not converged!" << std::endl; return false; } } void CheckVelocityConvergence(const double NormOldV) { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormV = 0.00; #pragma omp parallel for reduction(+ \ : NormV) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { NormV += r_vel[d] * r_vel[d]; } } NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); std::cout << "The norm of velocity is: " << NormV << " Old velocity norm was: " << NormOldV << std::endl; } bool CheckPressureIncrementConvergence(const double NormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormP = 0.00; double errorNormDp = 0; #pragma omp parallel for reduction(+ \ : NormP) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const double Pr = it_node->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); const double zero_tol = 1.0e-12; errorNormDp = (NormP < zero_tol) ? NormDp : NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " The norm of pressure increment is: " << NormDp << " Pressure error: " << errorNormDp << std::endl; } if (errorNormDp < mPressureTolerance) { std::cout << " The norm of pressure is: " << NormP << " The norm of pressure increment is: " << NormDp << " Pressure error: " << errorNormDp << " Converged!" << std::endl; return true; } else { std::cout << " The norm of pressure is: " << NormP << " The norm of pressure increment is: " << NormDp << " Pressure error: " << errorNormDp << " Not converged!" << std::endl; return false; } } void FixPressure() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; // if (it_node->Is(RIGID) && (it_node->X() < 0.001 || it_node->X() > 0.999)) // for closed domain case with analytical solution if (it_node->Is(FREE_SURFACE)) { it_node->FastGetSolutionStepValue(PRESSURE) = 0; it_node->Fix(PRESSURE); } } } void FreePressure() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; it_node->Free(PRESSURE); } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ThreeStepVPStrategy &operator=(ThreeStepVPStrategy const &rOther) {} /// Copy constructor. ThreeStepVPStrategy(ThreeStepVPStrategy const &rOther) {} ///@} }; /// Class ThreeStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_THREE_STEP_V_P_STRATEGY_H
executer.c
#include <stdlib.h> #include <stdio.h> #include "executer.h" //TODO: error string ast_node_t *simplify_tree(ast_node_t *tree) { if (tree == NULL) { return NULL; } if (tree->type == NODE_OPERATOR) { ast_node_t *first_child = simplify_tree(tree->children[0]); if (first_child == NULL) { return NULL; } double accum; if (first_child->type == NODE_VALUE) { //TODO: make number_t accum = first_child->data.value.real; free(first_child); } //#pragma omp parallel for for (int i = 1; i < tree->numChildren; i++) { ast_node_t *child = simplify_tree(tree->children[i]); if (child == NULL) { return NULL; } if (child->type == NODE_VALUE) { switch (tree->data.op) { case ADD: accum += child->data.value.real; break; case SUB: accum -= child->data.value.real; break; case MUL: accum *= child->data.value.real; break; case DIV: accum /= child->data.value.real; break; default: break; } free(child); } //TODO: variables } tree->numChildren = 0; tree->type = NODE_VALUE; tree->data.value = (number_t){accum}; } return tree; }
GB_binop__ne_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_int16) // A.*B function (eWiseMult): GB (_AemultB_08__ne_int16) // A.*B function (eWiseMult): GB (_AemultB_02__ne_int16) // A.*B function (eWiseMult): GB (_AemultB_04__ne_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int16) // A*D function (colscale): GB (_AxD__ne_int16) // D*A function (rowscale): GB (_DxB__ne_int16) // C+=B function (dense accum): GB (_Cdense_accumB__ne_int16) // C+=b function (dense accum): GB (_Cdense_accumb__ne_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int16) // C=scalar+B GB (_bind1st__ne_int16) // C=scalar+B' GB (_bind1st_tran__ne_int16) // C=A+scalar GB (_bind2nd__ne_int16) // C=A'+scalar GB (_bind2nd_tran__ne_int16) // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_INT16 || GxB_NO_NE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
network_simplex_simple.h
/* -*- mode: C++; indent-tabs-mode: nil; -*- * * * This file has been adapted by Nicolas Bonneel (2013), * from network_simplex.h from LEMON, a generic C++ optimization library, * to implement a lightweight network simplex for mass transport, more * memory efficient than the original file. A previous version of this file * is used as part of the Displacement Interpolation project, * Web: http://www.cs.ubc.ca/labs/imager/tr/2011/DisplacementInterpolation/ * * Revisions: * March 2015: added OpenMP parallelization * March 2017: included Antoine Rolet's trick to make it more robust * April 2018: IMPORTANT bug fix + uses 64bit integers (slightly slower but less risks of overflows), updated to a newer version of the algo by LEMON, sparse flow by default + minor edits. * * **** Original file Copyright Notice : * * Copyright (C) 2003-2010 * Egervary Jeno Kombinatorikus Optimalizalasi Kutatocsoport * (Egervary Research Group on Combinatorial Optimization, EGRES). * * Permission to use, modify and distribute this software is granted * provided that this copyright notice appears in all copies. For * precise terms see the accompanying LICENSE file. * * This software is provided "AS IS" with no warranty of any kind, * express or implied, and with no claim as to its suitability for any * purpose. * */ #ifndef LEMON_NETWORK_SIMPLEX_SIMPLE_H #define LEMON_NETWORK_SIMPLEX_SIMPLE_H /// \ingroup min_cost_flow_algs /// /// \file /// \brief Network Simplex algorithm for finding a minimum cost flow. // if your compiler has troubles with unorderedmaps, just comment the following line to use a slower std::map instead #define HASHMAP // now handled with unorderedmaps instead of stdext::hash_map. Should be better supported. #define SPARSE_FLOW // a sparse flow vector will be 10-15% slower for small problems but uses less memory and becomes faster for large problems (40k total nodes) #include <vector> #include <limits> #include <algorithm> #ifdef HASHMAP #include <unordered_map> #else #include <map> #endif #ifdef _OPENMP # include <omp.h> #endif #include <cmath> #include "full_bipartitegraph.h" #define INVALIDNODE -1 #define INVALID (-1) namespace lemon { template <typename T> class ProxyObject; template<typename T> class SparseValueVector { public: SparseValueVector(size_t n = 0) // parameter n for compatibility with standard vectors { } void resize(size_t n = 0) {}; T operator[](const size_t id) const { #ifdef HASHMAP typename std::unordered_map<size_t, T>::const_iterator it = data.find(id); #else typename std::map<size_t, T>::const_iterator it = data.find(id); #endif if (it == data.end()) return 0; else return it->second; } ProxyObject<T> operator[](const size_t id) { return ProxyObject<T>(this, id); } //private: #ifdef HASHMAP std::unordered_map<size_t, T> data; #else std::map<size_t, T> data; #endif }; template <typename T> class ProxyObject { public: ProxyObject(SparseValueVector<T> *v, size_t idx) { _v = v; _idx = idx; }; ProxyObject<T> & operator=(const T &v) { // If we get here, we know that operator[] was called to perform a write access, // so we can insert an item in the vector if needed if (v != 0) _v->data[_idx] = v; return *this; } operator T() { // If we get here, we know that operator[] was called to perform a read access, // so we can simply return the existing object #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) return 0; else return it->second; } void operator+=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = val; else { T sum = it->second + val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } void operator-=(T val) { if (val == 0) return; #ifdef HASHMAP typename std::unordered_map<size_t, T>::iterator it = _v->data.find(_idx); #else typename std::map<size_t, T>::iterator it = _v->data.find(_idx); #endif if (it == _v->data.end()) _v->data[_idx] = -val; else { T sum = it->second - val; if (sum == 0) _v->data.erase(it); else it->second = sum; } } SparseValueVector<T> *_v; size_t _idx; }; /// \addtogroup min_cost_flow_algs /// @{ /// \brief Implementation of the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow". /// /// \ref NetworkSimplexSimple implements the primal Network Simplex algorithm /// for finding a \ref min_cost_flow "minimum cost flow" /// \ref amo93networkflows, \ref dantzig63linearprog, /// \ref kellyoneill91netsimplex. /// This algorithm is a highly efficient specialized version of the /// linear programming simplex method directly for the minimum cost /// flow problem. /// /// In general, %NetworkSimplexSimple is the fastest implementation available /// in LEMON for this problem. /// Moreover, it supports both directions of the supply/demand inequality /// constraints. For more information, see \ref SupplyType. /// /// Most of the parameters of the problem (except for the digraph) /// can be given using separate functions, and the algorithm can be /// executed using the \ref run() function. If some parameters are not /// specified, then default values will be used. /// /// \tparam GR The digraph type the algorithm runs on. /// \tparam V The number type used for flow amounts, capacity bounds /// and supply values in the algorithm. By default, it is \c int. /// \tparam C The number type used for costs and potentials in the /// algorithm. By default, it is the same as \c V. /// /// \warning Both number types must be signed and all input data must /// be integer. /// /// \note %NetworkSimplexSimple provides five different pivot rule /// implementations, from which the most efficient one is used /// by default. For more information, see \ref PivotRule. template <typename GR, typename V = int, typename C = V, typename ArcsType = int64_t> class NetworkSimplexSimple { public: /// \brief Constructor. /// /// The constructor of the class. /// /// \param graph The digraph the algorithm runs on. /// \param arc_mixing Indicate if the arcs have to be stored in a /// mixed order in the internal data structure. /// In special cases, it could lead to better overall performance, /// but it is usually slower. Therefore it is disabled by default. NetworkSimplexSimple(const GR& graph, bool arc_mixing, int nbnodes, ArcsType nb_arcs, size_t maxiters = 0) : _graph(graph), _arc_mixing(arc_mixing), MAX(std::numeric_limits<Value>::max()), INF(std::numeric_limits<Value>::has_infinity ? std::numeric_limits<Value>::infinity() : MAX), //_arc_id(graph), _init_nb_nodes(nbnodes), _init_nb_arcs(nb_arcs) { // Reset data structures reset(); max_iter = maxiters; } /// The type of the flow amounts, capacity bounds and supply values typedef V Value; /// The type of the arc costs typedef C Cost; public: /// \brief Problem type constants for the \c run() function. /// /// Enum type containing the problem type constants that can be /// returned by the \ref run() function of the algorithm. enum ProblemType { /// The problem has no feasible solution (flow). INFEASIBLE, /// The problem has optimal solution (i.e. it is feasible and /// bounded), and the algorithm has found optimal flow and node /// potentials (primal and dual solutions). OPTIMAL, /// The objective function of the problem is unbounded, i.e. /// there is a directed cycle having negative total cost and /// infinite upper bound. UNBOUNDED }; /// \brief Constants for selecting the type of the supply constraints. /// /// Enum type containing constants for selecting the supply type, /// i.e. the direction of the inequalities in the supply/demand /// constraints of the \ref min_cost_flow "minimum cost flow problem". /// /// The default supply type is \c GEQ, the \c LEQ type can be /// selected using \ref supplyType(). /// The equality form is a special case of both supply types. enum SupplyType { /// This option means that there are <em>"greater or equal"</em> /// supply/demand constraints in the definition of the problem. GEQ, /// This option means that there are <em>"less or equal"</em> /// supply/demand constraints in the definition of the problem. LEQ }; private: size_t max_iter; TEMPLATE_DIGRAPH_TYPEDEFS(GR) typedef std::vector<int> IntVector; typedef std::vector<ArcsType> ArcVector; typedef std::vector<Value> ValueVector; typedef std::vector<Cost> CostVector; typedef std::vector<char> BoolVector; // Note: vector<char> is used instead of vector<bool> for efficiency reasons // State constants for arcs enum ArcState { STATE_UPPER = -1, STATE_TREE = 0, STATE_LOWER = 1 }; typedef std::vector<signed char> StateVector; // Note: vector<signed char> is used instead of vector<ArcState> for // efficiency reasons private: // Data related to the underlying digraph const GR &_graph; int _node_num; ArcsType _arc_num; ArcsType _all_arc_num; ArcsType _search_arc_num; // Parameters of the problem SupplyType _stype; Value _sum_supply; inline int _node_id(int n) const { return _node_num - n - 1; }; IntVector _source; // keep nodes as integers IntVector _target; bool _arc_mixing; // Node and arc data CostVector _cost; ValueVector _supply; #ifdef SPARSE_FLOW SparseValueVector<Value> _flow; #else ValueVector _flow; #endif CostVector _pi; // Data for storing the spanning tree structure IntVector _parent; ArcVector _pred; IntVector _thread; IntVector _rev_thread; IntVector _succ_num; IntVector _last_succ; IntVector _dirty_revs; BoolVector _forward; StateVector _state; ArcsType _root; // Temporary data used in the current pivot iteration ArcsType in_arc, join, u_in, v_in, u_out, v_out; ArcsType first, second, right, last; ArcsType stem, par_stem, new_stem; Value delta; const Value MAX; ArcsType mixingCoeff; public: /// \brief Constant for infinite upper bounds (capacities). /// /// Constant for infinite upper bounds (capacities). /// It is \c std::numeric_limits<Value>::infinity() if available, /// \c std::numeric_limits<Value>::max() otherwise. const Value INF; private: // thank you to DVK and MizardX from StackOverflow for this function! inline ArcsType sequence(ArcsType k) const { ArcsType smallv = (k > num_total_big_subsequence_numbers) & 1; k -= num_total_big_subsequence_numbers * smallv; ArcsType subsequence_length2 = subsequence_length - smallv; ArcsType subsequence_num = (k / subsequence_length2) + num_big_subsequences * smallv; ArcsType subsequence_offset = (k % subsequence_length2) * mixingCoeff; return subsequence_offset + subsequence_num; } ArcsType subsequence_length; ArcsType num_big_subsequences; ArcsType num_total_big_subsequence_numbers; inline ArcsType getArcID(const Arc &arc) const { //int n = _arc_num-arc._id-1; ArcsType n = _arc_num - GR::id(arc) - 1; //ArcsType a = mixingCoeff*(n%mixingCoeff) + n/mixingCoeff; //ArcsType b = _arc_id[arc]; if (_arc_mixing) return sequence(n); else return n; } // finally unused because too slow inline ArcsType getSource(const ArcsType arc) const { //ArcsType a = _source[arc]; //return a; ArcsType n = _arc_num - arc - 1; if (_arc_mixing) n = mixingCoeff*(n%mixingCoeff) + n / mixingCoeff; ArcsType b; if (n >= 0) b = _node_id(_graph.source(GR::arcFromId(n))); else { n = arc + 1 - _arc_num; if (n <= _node_num) b = _node_num; else if (n >= _graph._n1) b = _graph._n1; else b = _graph._n1 - n; } return b; } // Implementation of the Block Search pivot rule class BlockSearchPivotRule { private: // References to the NetworkSimplexSimple class const IntVector &_source; const IntVector &_target; const CostVector &_cost; const StateVector &_state; const CostVector &_pi; ArcsType &_in_arc; ArcsType _search_arc_num; // Pivot rule data ArcsType _block_size; ArcsType _next_arc; NetworkSimplexSimple &_ns; public: // Constructor BlockSearchPivotRule(NetworkSimplexSimple &ns) : _source(ns._source), _target(ns._target), _cost(ns._cost), _state(ns._state), _pi(ns._pi), _in_arc(ns.in_arc), _search_arc_num(ns._search_arc_num), _next_arc(0), _ns(ns) { // The main parameters of the pivot rule const double BLOCK_SIZE_FACTOR = 1; const ArcsType MIN_BLOCK_SIZE = 10; _block_size = std::max(ArcsType(BLOCK_SIZE_FACTOR * std::sqrt(double(_search_arc_num))), MIN_BLOCK_SIZE); } // Find next entering arc bool findEnteringArc() { Cost min_val = 0; ArcsType N = 1; #ifdef _OPENMP N = omp_get_max_threads(); #endif std::vector<Cost> minArray(N, 0); std::vector<ArcsType> arcId(N); #ifdef _OPENMP ArcsType bs = (ArcsType)ceil(_block_size / (double)N); #endif for (ArcsType i = 0; i < _search_arc_num; i += _block_size) { ArcsType e; ArcsType j; #pragma omp parallel { int t = 0; #ifdef _OPENMP t = omp_get_thread_num(); #endif #pragma omp for schedule(static, bs) lastprivate(e) for (j = 0; j < std::min(i + _block_size, _search_arc_num) - i; j++) { e = (_next_arc + i + j); if (e >= _search_arc_num) e -= _search_arc_num; Cost c = _state[e] * (_cost[e] + _pi[_source[e]] - _pi[_target[e]]); if (c < minArray[t]) { minArray[t] = c; arcId[t] = e; } } } for (int j = 0; j < N; j++) { if (minArray[j] < min_val) { min_val = minArray[j]; _in_arc = arcId[j]; } } Cost a = std::abs(_pi[_source[_in_arc]]) > std::abs(_pi[_target[_in_arc]]) ? std::abs(_pi[_source[_in_arc]]) : std::abs(_pi[_target[_in_arc]]); a = a > std::abs(_cost[_in_arc]) ? a : std::abs(_cost[_in_arc]); if (min_val < -std::numeric_limits<Cost>::epsilon()*a) { _next_arc = e; return true; } } Cost a = fabs(_pi[_source[_in_arc]]) > fabs(_pi[_target[_in_arc]]) ? fabs(_pi[_source[_in_arc]]) : fabs(_pi[_target[_in_arc]]); a = a > fabs(_cost[_in_arc]) ? a : fabs(_cost[_in_arc]); if (min_val >= -std::numeric_limits<Cost>::epsilon()*a) return false; return true; } }; //class BlockSearchPivotRule public: int _init_nb_nodes; ArcsType _init_nb_arcs; /// \name Parameters /// The parameters of the algorithm can be specified using these /// functions. /// @{ /// \brief Set the costs of the arcs. /// /// This function sets the costs of the arcs. /// If it is not used before calling \ref run(), the costs /// will be set to \c 1 on all arcs. /// /// \param map An arc map storing the costs. /// Its \c Value type must be convertible to the \c Cost type /// of the algorithm. /// /// \return <tt>(*this)</tt> template<typename CostMap> NetworkSimplexSimple& costMap(const CostMap& map) { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { _cost[getArcID(a)] = map[a]; } return *this; } /// \brief Set the costs of one arc. /// /// This function sets the costs of one arcs. /// Done for memory reasons /// /// \param arc An arc. /// \param arc A cost /// /// \return <tt>(*this)</tt> template<typename Value> NetworkSimplexSimple& setCost(const Arc& arc, const Value cost) { _cost[getArcID(arc)] = cost; return *this; } /// \brief Set the supply values of the nodes. /// /// This function sets the supply values of the nodes. /// If neither this function nor \ref stSupply() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// \param map A node map storing the supply values. /// Its \c Value type must be convertible to the \c Value type /// of the algorithm. /// /// \return <tt>(*this)</tt> template<typename SupplyMap> NetworkSimplexSimple& supplyMap(const SupplyMap& map) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { _supply[_node_id(n)] = map[n]; } return *this; } template<typename SupplyMap> NetworkSimplexSimple& supplyMap(const SupplyMap* map1, int n1, const SupplyMap* map2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n<n1) _supply[_node_id(n)] = map1[n]; else _supply[_node_id(n)] = map2[n - n1]; } return *this; } template<typename SupplyMap> NetworkSimplexSimple& supplyMapAll(SupplyMap val1, int n1, SupplyMap val2, int n2) { Node n; _graph.first(n); for (; n != INVALIDNODE; _graph.next(n)) { if (n<n1) _supply[_node_id(n)] = val1; else _supply[_node_id(n)] = val2; } return *this; } /// \brief Set single source and target nodes and a supply value. /// /// This function sets a single source node and a single target node /// and the required flow value. /// If neither this function nor \ref supplyMap() is used before /// calling \ref run(), the supply of each node will be set to zero. /// /// Using this function has the same effect as using \ref supplyMap() /// with such a map in which \c k is assigned to \c s, \c -k is /// assigned to \c t and all other nodes have zero supply value. /// /// \param s The source node. /// \param t The target node. /// \param k The required amount of flow from node \c s to node \c t /// (i.e. the supply of \c s and the demand of \c t). /// /// \return <tt>(*this)</tt> NetworkSimplexSimple& stSupply(const Node& s, const Node& t, Value k) { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } _supply[_node_id(s)] = k; _supply[_node_id(t)] = -k; return *this; } /// \brief Set the type of the supply constraints. /// /// This function sets the type of the supply/demand constraints. /// If it is not used before calling \ref run(), the \ref GEQ supply /// type will be used. /// /// For more information, see \ref SupplyType. /// /// \return <tt>(*this)</tt> NetworkSimplexSimple& supplyType(SupplyType supply_type) { _stype = supply_type; return *this; } /// @} /// \name Execution Control /// The algorithm can be executed using \ref run(). /// @{ /// \brief Run the algorithm. /// /// This function runs the algorithm. /// The paramters can be specified using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// For example, /// \code /// NetworkSimplexSimple<ListDigraph> ns(graph); /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// This function can be called more than once. All the given parameters /// are kept for the next call, unless \ref resetParams() or \ref reset() /// is used, thus only the modified parameters have to be set again. /// If the underlying digraph was also modified after the construction /// of the class (or the last \ref reset() call), then the \ref reset() /// function must be called. /// /// \param pivot_rule The pivot rule that will be used during the /// algorithm. For more information, see \ref PivotRule. /// /// \return \c INFEASIBLE if no feasible flow exists, /// \n \c OPTIMAL if the problem has optimal solution /// (i.e. it is feasible and bounded), and the algorithm has found /// optimal flow and node potentials (primal and dual solutions), /// \n \c UNBOUNDED if the objective function of the problem is /// unbounded, i.e. there is a directed cycle having negative total /// cost and infinite upper bound. /// /// \see ProblemType, PivotRule /// \see resetParams(), reset() ProblemType run() { if (!init()) return INFEASIBLE; return start(); } /// \brief Reset all the parameters that have been given before. /// /// This function resets all the paramaters that have been given /// before using functions \ref lowerMap(), \ref upperMap(), /// \ref costMap(), \ref supplyMap(), \ref stSupply(), \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// For example, /// \code /// NetworkSimplexSimple<ListDigraph> ns(graph); /// /// // First run /// ns.lowerMap(lower).upperMap(upper).costMap(cost) /// .supplyMap(sup).run(); /// /// // Run again with modified cost map (resetParams() is not called, /// // so only the cost map have to be set again) /// cost[e] += 100; /// ns.costMap(cost).run(); /// /// // Run again from scratch using resetParams() /// // (the lower bounds will be set to zero on all arcs) /// ns.resetParams(); /// ns.upperMap(capacity).costMap(cost) /// .supplyMap(sup).run(); /// \endcode /// /// \return <tt>(*this)</tt> /// /// \see reset(), run() NetworkSimplexSimple& resetParams() { for (int i = 0; i != _node_num; ++i) { _supply[i] = 0; } for (ArcsType i = 0; i != _arc_num; ++i) { _cost[i] = 1; } _stype = GEQ; return *this; } /// \brief Reset the internal data structures and all the parameters /// that have been given before. /// /// This function resets the internal data structures and all the /// paramaters that have been given before using functions \ref lowerMap(), /// \ref upperMap(), \ref costMap(), \ref supplyMap(), \ref stSupply(), /// \ref supplyType(). /// /// It is useful for multiple \ref run() calls. Basically, all the given /// parameters are kept for the next \ref run() call, unless /// \ref resetParams() or \ref reset() is used. /// If the underlying digraph was also modified after the construction /// of the class or the last \ref reset() call, then the \ref reset() /// function must be used, otherwise \ref resetParams() is sufficient. /// /// See \ref resetParams() for examples. /// /// \return <tt>(*this)</tt> /// /// \see resetParams(), run() NetworkSimplexSimple& reset() { // Resize vectors _node_num = _init_nb_nodes; _arc_num = _init_nb_arcs; int all_node_num = _node_num + 1; ArcsType max_arc_num = _arc_num + 2 * _node_num; _source.resize(max_arc_num); _target.resize(max_arc_num); _cost.resize(max_arc_num); _supply.resize(all_node_num); _flow.resize(max_arc_num); _pi.resize(all_node_num); _parent.resize(all_node_num); _pred.resize(all_node_num); _forward.resize(all_node_num); _thread.resize(all_node_num); _rev_thread.resize(all_node_num); _succ_num.resize(all_node_num); _last_succ.resize(all_node_num); _state.resize(max_arc_num); //_arc_mixing=false; if (_arc_mixing && _node_num > 1) { // Store the arcs in a mixed order //ArcsType k = std::max(ArcsType(std::sqrt(double(_arc_num))), ArcsType(10)); const ArcsType k = std::max(ArcsType(_arc_num / _node_num), ArcsType(3)); mixingCoeff = k; subsequence_length = _arc_num / mixingCoeff + 1; num_big_subsequences = _arc_num % mixingCoeff; num_total_big_subsequence_numbers = subsequence_length * num_big_subsequences; #pragma omp parallel for schedule(static) for (Arc a = 0; a <= _graph.maxArcId(); a++) { // --a <=> _graph.next(a) , -1 == INVALID ArcsType i = sequence(_graph.maxArcId()-a); _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); } } else { // Store the arcs in the original order ArcsType i = 0; Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a), ++i) { _source[i] = _node_id(_graph.source(a)); _target[i] = _node_id(_graph.target(a)); //_arc_id[a] = i; } } // Reset parameters resetParams(); return *this; } /// @} /// \name Query Functions /// The results of the algorithm can be obtained using these /// functions.\n /// The \ref run() function must be called before using them. /// @{ /// \brief Return the total cost of the found flow. /// /// This function returns the total cost of the found flow. /// Its complexity is O(e). /// /// \note The return type of the function can be specified as a /// template parameter. For example, /// \code /// ns.totalCost<double>(); /// \endcode /// It is useful if the total cost cannot be stored in the \c Cost /// type of the algorithm, which is the default return type of the /// function. /// /// \pre \ref run() must be called before using this function. /*template <typename Number> Number totalCost() const { Number c = 0; for (ArcIt a(_graph); a != INVALID; ++a) { int i = getArcID(a); c += Number(_flow[i]) * Number(_cost[i]); } return c; }*/ template <typename Number> Number totalCost() const { Number c = 0; #ifdef SPARSE_FLOW #ifdef HASHMAP typename std::unordered_map<size_t, Value>::const_iterator it; #else typename std::map<size_t, Value>::const_iterator it; #endif for (it = _flow.data.begin(); it!=_flow.data.end(); ++it) c += Number(it->second) * Number(_cost[it->first]); return c; #else for (ArcsType i = 0; i<_flow.size(); i++) c += _flow[i] * Number(_cost[i]); return c; #endif } #ifndef DOXYGEN Cost totalCost() const { return totalCost<Cost>(); } #endif /// \brief Return the flow on the given arc. /// /// This function returns the flow on the given arc. /// /// \pre \ref run() must be called before using this function. Value flow(const Arc& a) const { return _flow[getArcID(a)]; } /// \brief Return the flow map (the primal solution). /// /// This function copies the flow value on each arc into the given /// map. The \c Value type of the algorithm must be convertible to /// the \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template <typename FlowMap> void flowMap(FlowMap &map) const { Arc a; _graph.first(a); for (; a != INVALID; _graph.next(a)) { map.set(a, _flow[getArcID(a)]); } } /// \brief Return the potential (dual value) of the given node. /// /// This function returns the potential (dual value) of the /// given node. /// /// \pre \ref run() must be called before using this function. Cost potential(const Node& n) const { return _pi[_node_id(n)]; } /// \brief Return the potential map (the dual solution). /// /// This function copies the potential (dual value) of each node /// into the given map. /// The \c Cost type of the algorithm must be convertible to the /// \c Value type of the map. /// /// \pre \ref run() must be called before using this function. template <typename PotentialMap> void potentialMap(PotentialMap &map) const { Node n; _graph.first(n); for (; n != INVALID; _graph.next(n)) { map.set(n, _pi[_node_id(n)]); } } /// @} private: // Initialize internal data structures bool init() { if (_node_num == 0) return false; // Check the sum of supply values _sum_supply = 0; for (int i = 0; i != _node_num; ++i) { _sum_supply += _supply[i]; } // Initialize artifical cost Cost ART_COST; if (std::numeric_limits<Cost>::is_exact) { ART_COST = std::numeric_limits<Cost>::max() / 2 + 1; } else { ART_COST = 0; for (ArcsType i = 0; i != _arc_num; ++i) { if (_cost[i] > ART_COST) ART_COST = _cost[i]; } ART_COST = (ART_COST + 1) * _node_num; } // Initialize arc maps for (ArcsType i = 0; i != _arc_num; ++i) { #ifndef SPARSE_FLOW _flow[i] = 0; //by default, the sparse matrix is empty #endif _state[i] = STATE_LOWER; } #ifdef SPARSE_FLOW _flow = SparseValueVector<Value>(); #endif // Set data for the artificial root node _root = _node_num; _parent[_root] = -1; _pred[_root] = -1; _thread[_root] = 0; _rev_thread[0] = _root; _succ_num[_root] = _node_num + 1; _last_succ[_root] = _root - 1; _supply[_root] = -_sum_supply; _pi[_root] = 0; // Add artificial arcs and initialize the spanning tree data structure if (_sum_supply == 0) { // EQ supply constraints _search_arc_num = _arc_num; _all_arc_num = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _pred[u] = e; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; _state[e] = STATE_TREE; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; } else { _forward[u] = false; _pi[u] = ART_COST; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = ART_COST; } } } else if (_sum_supply > 0) { // LEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] >= 0) { _forward[u] = true; _pi[u] = 0; _pred[u] = e; _source[e] = u; _target[e] = _root; _flow[e] = _supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = false; _pi[u] = ART_COST; _pred[u] = f; _source[f] = _root; _target[f] = u; _flow[f] = -_supply[u]; _cost[f] = ART_COST; _state[f] = STATE_TREE; _source[e] = u; _target[e] = _root; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } else { // GEQ supply constraints _search_arc_num = _arc_num + _node_num; ArcsType f = _arc_num + _node_num; for (ArcsType u = 0, e = _arc_num; u != _node_num; ++u, ++e) { _parent[u] = _root; _thread[u] = u + 1; _rev_thread[u + 1] = u; _succ_num[u] = 1; _last_succ[u] = u; if (_supply[u] <= 0) { _forward[u] = false; _pi[u] = 0; _pred[u] = e; _source[e] = _root; _target[e] = u; _flow[e] = -_supply[u]; _cost[e] = 0; _state[e] = STATE_TREE; } else { _forward[u] = true; _pi[u] = -ART_COST; _pred[u] = f; _source[f] = u; _target[f] = _root; _flow[f] = _supply[u]; _state[f] = STATE_TREE; _cost[f] = ART_COST; _source[e] = _root; _target[e] = u; //_flow[e] = 0; //by default, the sparse matrix is empty _cost[e] = 0; _state[e] = STATE_LOWER; ++f; } } _all_arc_num = f; } return true; } // Find the join node void findJoinNode() { int u = _source[in_arc]; int v = _target[in_arc]; while (u != v) { if (_succ_num[u] < _succ_num[v]) { u = _parent[u]; } else { v = _parent[v]; } } join = u; } // Find the leaving arc of the cycle and returns true if the // leaving arc is not the same as the entering arc bool findLeavingArc() { // Initialize first and second nodes according to the direction // of the cycle if (_state[in_arc] == STATE_LOWER) { first = _source[in_arc]; second = _target[in_arc]; } else { first = _target[in_arc]; second = _source[in_arc]; } delta = INF; char result = 0; Value d; ArcsType e; // Search the cycle along the path form the first node to the root for (int u = first; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? _flow[e] : INF; if (d < delta) { delta = d; u_out = u; result = 1; } } // Search the cycle along the path form the second node to the root for (int u = second; u != join; u = _parent[u]) { e = _pred[u]; d = _forward[u] ? INF : _flow[e]; if (d <= delta) { delta = d; u_out = u; result = 2; } } if (result == 1) { u_in = first; v_in = second; } else { u_in = second; v_in = first; } return result != 0; } // Change _flow and _state vectors void changeFlow(bool change) { // Augment along the cycle if (delta > 0) { Value val = _state[in_arc] * delta; _flow[in_arc] += val; for (int u = _source[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? -val : val; } for (int u = _target[in_arc]; u != join; u = _parent[u]) { _flow[_pred[u]] += _forward[u] ? val : -val; } } // Update the state of the entering and leaving arcs if (change) { _state[in_arc] = STATE_TREE; _state[_pred[u_out]] = (_flow[_pred[u_out]] == 0) ? STATE_LOWER : STATE_UPPER; } else { _state[in_arc] = -_state[in_arc]; } } // Update the tree structure void updateTreeStructure() { int old_rev_thread = _rev_thread[u_out]; int old_succ_num = _succ_num[u_out]; int old_last_succ = _last_succ[u_out]; v_out = _parent[u_out]; // Check if u_in and u_out coincide if (u_in == u_out) { // Update _parent, _pred, _pred_dir _parent[u_in] = v_in; _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); // Update _thread and _rev_thread if (_thread[v_in] != u_out) { ArcsType after = _thread[old_last_succ]; _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; after = _thread[v_in]; _thread[v_in] = u_out; _rev_thread[u_out] = v_in; _thread[old_last_succ] = after; _rev_thread[after] = old_last_succ; } } else { // Handle the case when old_rev_thread equals to v_in // (it also means that join and v_out coincide) int thread_continue = old_rev_thread == v_in ? _thread[old_last_succ] : _thread[v_in]; // Update _thread and _parent along the stem nodes (i.e. the nodes // between u_in and u_out, whose parent have to be changed) int stem = u_in; // the current stem node int par_stem = v_in; // the new parent of stem int next_stem; // the next stem node int last = _last_succ[u_in]; // the last successor of stem int before, after = _thread[last]; _thread[v_in] = u_in; _dirty_revs.clear(); _dirty_revs.push_back(v_in); while (stem != u_out) { // Insert the next stem node into the thread list next_stem = _parent[stem]; _thread[last] = next_stem; _dirty_revs.push_back(last); // Remove the subtree of stem from the thread list before = _rev_thread[stem]; _thread[before] = after; _rev_thread[after] = before; // Change the parent node and shift stem nodes _parent[stem] = par_stem; par_stem = stem; stem = next_stem; // Update last and after last = _last_succ[stem] == _last_succ[par_stem] ? _rev_thread[par_stem] : _last_succ[stem]; after = _thread[last]; } _parent[u_out] = par_stem; _thread[last] = thread_continue; _rev_thread[thread_continue] = last; _last_succ[u_out] = last; // Remove the subtree of u_out from the thread list except for // the case when old_rev_thread equals to v_in if (old_rev_thread != v_in) { _thread[old_rev_thread] = after; _rev_thread[after] = old_rev_thread; } // Update _rev_thread using the new _thread values for (int i = 0; i != int(_dirty_revs.size()); ++i) { int u = _dirty_revs[i]; _rev_thread[_thread[u]] = u; } // Update _pred, _pred_dir, _last_succ and _succ_num for the // stem nodes from u_out to u_in int tmp_sc = 0, tmp_ls = _last_succ[u_out]; for (int u = u_out, p = _parent[u]; u != u_in; u = p, p = _parent[u]) { _pred[u] = _pred[p]; _forward[u] = !_forward[p]; tmp_sc += _succ_num[u] - _succ_num[p]; _succ_num[u] = tmp_sc; _last_succ[p] = tmp_ls; } _pred[u_in] = in_arc; _forward[u_in] = (u_in == _source[in_arc]); _succ_num[u_in] = old_succ_num; } // Update _last_succ from v_in towards the root int up_limit_out = _last_succ[join] == v_in ? join : -1; int last_succ_out = _last_succ[u_out]; for (int u = v_in; u != -1 && _last_succ[u] == v_in; u = _parent[u]) { _last_succ[u] = last_succ_out; } // Update _last_succ from v_out towards the root if (join != old_rev_thread && v_in != old_rev_thread) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = old_rev_thread; } } else if (last_succ_out != old_last_succ) { for (int u = v_out; u != up_limit_out && _last_succ[u] == old_last_succ; u = _parent[u]) { _last_succ[u] = last_succ_out; } } // Update _succ_num from v_in to join for (int u = v_in; u != join; u = _parent[u]) { _succ_num[u] += old_succ_num; } // Update _succ_num from v_out to join for (int u = v_out; u != join; u = _parent[u]) { _succ_num[u] -= old_succ_num; } } void updatePotential() { Cost sigma = _pi[v_in] - _pi[u_in] - ((_forward[u_in])?_cost[in_arc]:(-_cost[in_arc])); int end = _thread[_last_succ[u_in]]; for (int u = u_in; u != end; u = _thread[u]) { _pi[u] += sigma; } } // Heuristic initial pivots bool initialPivots() { Value curr, total = 0; std::vector<Node> supply_nodes, demand_nodes; Node u; _graph.first(u); for (; u != INVALIDNODE; _graph.next(u)) { curr = _supply[_node_id(u)]; if (curr > 0) { total += curr; supply_nodes.push_back(u); } else if (curr < 0) { demand_nodes.push_back(u); } } if (_sum_supply > 0) total -= _sum_supply; if (total <= 0) return true; ArcVector arc_vector; if (_sum_supply >= 0) { if (supply_nodes.size() == 1 && demand_nodes.size() == 1) { // Perform a reverse graph search from the sink to the source //typename GR::template NodeMap<bool> reached(_graph, false); BoolVector reached(_node_num, false); Node s = supply_nodes[0], t = demand_nodes[0]; std::vector<Node> stack; reached[t] = true; stack.push_back(t); while (!stack.empty()) { Node u, v = stack.back(); stack.pop_back(); if (v == s) break; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { if (reached[u = _graph.source(a)]) continue; ArcsType j = getArcID(a); arc_vector.push_back(j); reached[u] = true; stack.push_back(u); } } } else { arc_vector.resize(demand_nodes.size()); // Find the min. cost incomming arc for each demand node #pragma omp parallel for for (ArcsType i = 0; i < ArcsType(demand_nodes.size()); ++i) { Node v = demand_nodes[i]; Cost min_cost = std::numeric_limits<Cost>::max(); Arc min_arc = INVALID; Arc a; _graph.firstIn(a, v); for (; a != INVALID; _graph.nextIn(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } } else { arc_vector.resize(supply_nodes.size()); // Find the min. cost outgoing arc for each supply node #pragma omp parallel for for (int i = 0; i < int(supply_nodes.size()); ++i) { Node u = supply_nodes[i]; Cost min_cost = std::numeric_limits<Cost>::max(); Arc min_arc = INVALID; Arc a; _graph.firstOut(a, u); for (; a != INVALID; _graph.nextOut(a)) { Cost c = _cost[getArcID(a)]; if (c < min_cost) { min_cost = c; min_arc = a; } } arc_vector[i] = getArcID(min_arc); } arc_vector.erase(std::remove(arc_vector.begin(), arc_vector.end(), INVALID), arc_vector.end()); } // Perform heuristic initial pivots for (ArcsType i = 0; i != ArcsType(arc_vector.size()); ++i) { in_arc = arc_vector[i]; if (_state[in_arc] * (_cost[in_arc] + _pi[_source[in_arc]] - _pi[_target[in_arc]]) >= 0) continue; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) { return false; } changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } return true; } // Execute the algorithm ProblemType start() { return start<BlockSearchPivotRule>(); } template <typename PivotRuleImpl> ProblemType start() { PivotRuleImpl pivot(*this); // Perform heuristic initial pivots if (!initialPivots()) return UNBOUNDED; size_t iter_number = 0; // Execute the Network Simplex algorithm while (pivot.findEnteringArc()) { if ((iter_number <= max_iter&&max_iter > 0) || max_iter<=0) { iter_number++; findJoinNode(); bool change = findLeavingArc(); if (delta >= MAX) return UNBOUNDED; changeFlow(change); if (change) { updateTreeStructure(); updatePotential(); } } else break; } // Check feasibility for (ArcsType e = _search_arc_num; e != _all_arc_num; ++e) { if (_flow[e] != 0) return INFEASIBLE; } // Shift potentials to meet the requirements of the GEQ/LEQ type // optimality conditions if (_sum_supply == 0) { if (_stype == GEQ) { Cost max_pot = -std::numeric_limits<Cost>::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] > max_pot) max_pot = _pi[i]; } if (max_pot > 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= max_pot; } } else { Cost min_pot = std::numeric_limits<Cost>::max(); for (ArcsType i = 0; i != _node_num; ++i) { if (_pi[i] < min_pot) min_pot = _pi[i]; } if (min_pot < 0) { for (ArcsType i = 0; i != _node_num; ++i) _pi[i] -= min_pot; } } } return OPTIMAL; } }; //class NetworkSimplexSimple ///@} } //namespace lemon #endif //LEMON_NETWORK_SIMPLEX_H
softmax_layer.c
#include "softmax_layer.h" #include "blas.h" #include "dark_cuda.h" #include "utils.h" #include "blas.h" #include <float.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #define SECRET_NUM -1234 void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output) { int b; for (b = 0; b < batch; ++b) { int i; int count = 0; for (i = 0; i < hierarchy->groups; ++i) { int group_size = hierarchy->group_size[i]; softmax(input + b*inputs + count, group_size, temp, output + b*inputs + count, 1); count += group_size; } } } softmax_layer make_softmax_layer(int batch, int inputs, int groups) { assert(inputs%groups == 0); fprintf(stderr, "softmax %4d\n", inputs); softmax_layer l = { (LAYER_TYPE)0 }; l.type = SOFTMAX; l.batch = batch; l.groups = groups; l.inputs = inputs; l.outputs = inputs; l.loss = (float*)xcalloc(inputs * batch, sizeof(float)); l.output = (float*)xcalloc(inputs * batch, sizeof(float)); l.delta = (float*)xcalloc(inputs * batch, sizeof(float)); l.cost = (float*)xcalloc(1, sizeof(float)); l.forward = forward_softmax_layer; l.backward = backward_softmax_layer; #ifdef GPU l.forward_gpu = forward_softmax_layer_gpu; l.backward_gpu = backward_softmax_layer_gpu; l.output_gpu = cuda_make_array(l.output, inputs*batch); l.loss_gpu = cuda_make_array(l.loss, inputs*batch); l.delta_gpu = cuda_make_array(l.delta, inputs*batch); #endif return l; } void forward_softmax_layer(const softmax_layer l, network_state net) { if(l.softmax_tree){ int i; int count = 0; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_cpu(net.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count); count += group_size; } } else { softmax_cpu(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output); } if(net.truth && !l.noloss){ softmax_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss); l.cost[0] = sum_array(l.loss, l.batch*l.inputs); } } void backward_softmax_layer(const softmax_layer l, network_state net) { axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1); } #ifdef GPU void pull_softmax_layer_output(const softmax_layer layer) { cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch); } void forward_softmax_layer_gpu(const softmax_layer l, network_state net) { if(l.softmax_tree){ softmax_tree_gpu(net.input, 1, l.batch, l.inputs, l.temperature, l.output_gpu, *l.softmax_tree); /* int i; int count = 0; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(net.input_gpu + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output_gpu + count); count += group_size; } */ } else { if(l.spatial){ softmax_gpu_new_api(net.input, l.c, l.batch*l.c, l.inputs/l.c, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu); }else{ softmax_gpu_new_api(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output_gpu); } } if(net.truth && !l.noloss){ softmax_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth, l.delta_gpu, l.loss_gpu); if(l.softmax_tree){ mask_gpu_new_api(l.batch*l.inputs, l.delta_gpu, SECRET_NUM, net.truth, 0); mask_gpu_new_api(l.batch*l.inputs, l.loss_gpu, SECRET_NUM, net.truth, 0); } cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs); l.cost[0] = sum_array(l.loss, l.batch*l.inputs); } } void backward_softmax_layer_gpu(const softmax_layer layer, network_state state) { axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1); } #endif // ------------------------------------- // Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf contrastive_layer make_contrastive_layer(int batch, int w, int h, int c, int classes, int inputs, layer *yolo_layer) { contrastive_layer l = { (LAYER_TYPE)0 }; l.type = CONTRASTIVE; l.batch = batch; l.inputs = inputs; l.w = w; l.h = h; l.c = c; l.temperature = 1; l.max_boxes = 0; if (yolo_layer) { l.detection = 1; l.max_boxes = yolo_layer->max_boxes; l.labels = yolo_layer->labels; // track id l.class_ids = yolo_layer->class_ids; // class_ids l.n = yolo_layer->n; // num of embeddings per cell = num of anchors l.classes = yolo_layer->classes;// num of classes classes = l.classes; l.embedding_size = l.inputs / (l.n*l.h*l.w); l.truths = yolo_layer->truths; if (l.embedding_size != yolo_layer->embedding_size) { printf(" Error: [contrastive] embedding_size=%d isn't equal to [yolo] embedding_size=%d. They should use the same [convolutional] layer \n", l.embedding_size, yolo_layer->embedding_size); getchar(); exit(0); } if (l.inputs % (l.n*l.h*l.w) != 0) { printf(" Warning: filters= number in the previous (embedding) layer isn't divisable by number of anchors %d \n", l.n); getchar(); } } else { l.detection = 0; l.labels = (int*)xcalloc(l.batch, sizeof(int)); // labels l.n = 1; // num of embeddings per cell l.classes = classes; // num of classes l.embedding_size = l.c; } l.outputs = inputs; l.loss = (float*)xcalloc(1, sizeof(float)); l.output = (float*)xcalloc(inputs * batch, sizeof(float)); l.delta = (float*)xcalloc(inputs * batch, sizeof(float)); l.cost = (float*)xcalloc(1, sizeof(float)); const size_t step = l.batch*l.n*l.h*l.w; l.cos_sim = NULL; l.exp_cos_sim = NULL; l.p_constrastive = NULL; if (!l.detection) { l.cos_sim = (float*)xcalloc(step*step, sizeof(float)); l.exp_cos_sim = (float*)xcalloc(step*step, sizeof(float)); l.p_constrastive = (float*)xcalloc(step*step, sizeof(float)); } //l.p_constrastive = (float*)xcalloc(step*step, sizeof(float)); //l.contrast_p_size = (int*)xcalloc(1, sizeof(int)); //*l.contrast_p_size = step; //l.contrast_p = (contrastive_params*)xcalloc(*l.contrast_p_size, sizeof(contrastive_params)); l.forward = forward_contrastive_layer; l.backward = backward_contrastive_layer; #ifdef GPU l.forward_gpu = forward_contrastive_layer_gpu; l.backward_gpu = backward_contrastive_layer_gpu; l.output_gpu = cuda_make_array(l.output, inputs*batch); l.delta_gpu = cuda_make_array(l.delta, inputs*batch); const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch) * sizeof(contrastive_params)/4; printf(" max_contr_size = %d MB \n", max_contr_size / (1024*1024)); l.contrast_p_gpu = (contrastive_params *)cuda_make_array(NULL, max_contr_size); #endif fprintf(stderr, "contrastive %4d x%4d x%4d x emb_size %4d x batch: %4d classes = %4d, step = %4d \n", w, h, l.n, l.embedding_size, batch, l.classes, step); if(l.detection) fprintf(stderr, "detection \n"); return l; } static inline float clip_value(float val, const float max_val) { if (val > max_val) { //printf("\n val = %f > max_val = %f \n", val, max_val); val = max_val; } else if (val < -max_val) { //printf("\n val = %f < -max_val = %f \n", val, -max_val); val = -max_val; } return val; } void forward_contrastive_layer(contrastive_layer l, network_state state) { if (!state.train) return; const float truth_thresh = state.net.label_smooth_eps; const int mini_batch = l.batch / l.steps; int b, n, w, h; fill_cpu(l.batch*l.inputs, 0, l.delta, 1); if (!l.detection) { for (b = 0; b < l.batch; ++b) { if (state.net.adversarial) l.labels[b] = b % 2; else l.labels[b] = b / 2; } // set labels for (b = 0; b < l.batch; ++b) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { // find truth with max prob (only 1 label even if mosaic is used) float max_truth = 0; int n; for (n = 0; n < l.classes; ++n) { const float truth_prob = state.truth[b*l.classes + n]; //printf(" truth_prob = %f, ", truth_prob); //if (truth_prob > max_truth) if (truth_prob > truth_thresh) { //printf(" truth_prob = %f, max_truth = %f, n = %d; ", truth_prob, max_truth, n); max_truth = truth_prob; l.labels[b] = n; } } //printf(", l.labels[b] = %d ", l.labels[b]); } } } } //printf("\n\n"); // set pointers to features float **z = (float**)xcalloc(l.batch*l.n*l.h*l.w, sizeof(float*)); for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; if (l.labels[z_index] < 0) continue; //const int input_index = b*l.inputs + n*l.embedding_size*l.h*l.w + h*l.w + w; //float *ptr = state.input + input_index; //z[z_index] = ptr; z[z_index] = (float*)xcalloc(l.embedding_size, sizeof(float)); get_embedding(state.input, l.w, l.h, l.c, l.embedding_size, w, h, n, b, z[z_index]); } } } } int b2, n2, h2, w2; int contrast_p_index = 0; const size_t step = l.batch*l.n*l.h*l.w; size_t contrast_p_size = step; if (!l.detection) contrast_p_size = l.batch*l.batch; contrastive_params *contrast_p = (contrastive_params*)xcalloc(contrast_p_size, sizeof(contrastive_params)); float *max_sim_same = (float *)xcalloc(l.batch*l.inputs, sizeof(float)); float *max_sim_diff = (float *)xcalloc(l.batch*l.inputs, sizeof(float)); fill_cpu(l.batch*l.inputs, -10, max_sim_same, 1); fill_cpu(l.batch*l.inputs, -10, max_sim_diff, 1); // precalculate cosine similiraty for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; if (l.labels[z_index] < 0) continue; for (b2 = 0; b2 < l.batch; ++b2) { for (n2 = 0; n2 < l.n; ++n2) { for (h2 = 0; h2 < l.h; ++h2) { for (w2 = 0; w2 < l.w; ++w2) { const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2; if (l.labels[z_index2] < 0) continue; if (z_index == z_index2) continue; if (l.detection) if (l.class_ids[z_index] != l.class_ids[z_index2]) continue; const int time_step_i = b / mini_batch; const int time_step_j = b2 / mini_batch; if (time_step_i != time_step_j) continue; const size_t step = l.batch*l.n*l.h*l.w; const float sim = cosine_similarity(z[z_index], z[z_index2], l.embedding_size); const float exp_sim = expf(sim / l.temperature); if (!l.detection) { l.cos_sim[z_index*step + z_index2] = sim; l.exp_cos_sim[z_index*step + z_index2] = exp_sim; } // calc good sim if (l.labels[z_index] == l.labels[z_index2] && max_sim_same[z_index] < sim) max_sim_same[z_index] = sim; if (l.labels[z_index] != l.labels[z_index2] && max_sim_diff[z_index] < sim) max_sim_diff[z_index] = sim; //printf(" z_i = %d, z_i2 = %d, l = %d, l2 = %d, sim = %f \n", z_index, z_index2, l.labels[z_index], l.labels[z_index2], sim); contrast_p[contrast_p_index].sim = sim; contrast_p[contrast_p_index].exp_sim = exp_sim; contrast_p[contrast_p_index].i = z_index; contrast_p[contrast_p_index].j = z_index2; contrast_p[contrast_p_index].time_step_i = time_step_i; contrast_p[contrast_p_index].time_step_j = time_step_j; contrast_p_index++; //printf(" contrast_p_index = %d, contrast_p_size = %d \n", contrast_p_index, contrast_p_size); if ((contrast_p_index+1) >= contrast_p_size) { contrast_p_size = contrast_p_index + 1; //printf(" contrast_p_size = %d, z_index = %d, z_index2 = %d \n", contrast_p_size, z_index, z_index2); contrast_p = (contrastive_params*)xrealloc(contrast_p, contrast_p_size * sizeof(contrastive_params)); } if (sim > 1.001 || sim < -1.001) { printf(" sim = %f, ", sim); getchar(); } } } } } } } } } // calc contrastive accuracy int i; int good_sims = 0, all_sims = 0, same_sim = 0, diff_sim = 0; for (i = 0; i < l.batch*l.inputs; ++i) { if (max_sim_same[i] >= -1 && max_sim_diff[i] >= -1) { if (max_sim_same[i] >= -1) same_sim++; if (max_sim_diff[i] >= -1) diff_sim++; ++all_sims; //printf(" max_sim_diff[i] = %f, max_sim_same[i] = %f \n", max_sim_diff[i], max_sim_same[i]); if (max_sim_diff[i] < max_sim_same[i]) good_sims++; } } if (all_sims > 0) { *l.loss = 100 * good_sims / all_sims; } else *l.loss = -1; printf(" Contrast accuracy = %f %%, all = %d, good = %d, same = %d, diff = %d \n", *l.loss, all_sims, good_sims, same_sim, diff_sim); free(max_sim_same); free(max_sim_diff); /* // show near sim float good_contrast = 0; for (b = 0; b < l.batch; b += 2) { float same = l.cos_sim[b*l.batch + b]; float aug = l.cos_sim[b*l.batch + b + 1]; float diff = l.cos_sim[b*l.batch + b + 2]; good_contrast += (aug > diff); //printf(" l.labels[b] = %d, l.labels[b+1] = %d, l.labels[b+2] = %d, b = %d \n", l.labels[b], l.labels[b + 1], l.labels[b + 2], b); //printf(" same = %f, aug = %f, diff = %f, (aug > diff) = %d \n", same, aug, diff, (aug > diff)); } *l.loss = 100 * good_contrast / (l.batch / 2); printf(" Contrast accuracy = %f %% \n", *l.loss); */ /* // precalculate P_contrastive for (b = 0; b < l.batch; ++b) { int b2; for (b2 = 0; b2 < l.batch; ++b2) { if (b != b2) { const float P = P_constrastive(b, b2, l.labels, l.batch, z, l.embedding_size, l.temperature, l.cos_sim); l.p_constrastive[b*l.batch + b2] = P; if (P > 1 || P < -1) { printf(" p = %f, ", P); getchar(); } } } } */ const size_t contr_size = contrast_p_index; if (l.detection) { #ifdef GPU const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch); if (max_contr_size < contr_size) { printf(" Error: too large number of bboxes: contr_size = %d > max_contr_size = %d \n", contr_size, max_contr_size); exit(0); } int *labels = NULL; if (contr_size > 2) { cuda_push_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4); P_constrastive_f_det_gpu(labels, l.embedding_size, l.temperature, l.contrast_p_gpu, contr_size); cuda_pull_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4); } #else // GPU int k; //#pragma omp parallel for for (k = 0; k < contr_size; ++k) { contrast_p[k].P = P_constrastive_f_det(k, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size); } #endif // GPU } else { // precalculate P-contrastive for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; if (l.labels[z_index] < 0) continue; for (b2 = 0; b2 < l.batch; ++b2) { for (n2 = 0; n2 < l.n; ++n2) { for (h2 = 0; h2 < l.h; ++h2) { for (w2 = 0; w2 < l.w; ++w2) { const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2; if (l.labels[z_index2] < 0) continue; if (z_index == z_index2) continue; if (l.detection) if (l.class_ids[z_index] != l.class_ids[z_index2]) continue; const int time_step_i = b / mini_batch; const int time_step_j = b2 / mini_batch; if (time_step_i != time_step_j) continue; const size_t step = l.batch*l.n*l.h*l.w; float P = -10; if (l.detection) { P = P_constrastive_f(z_index, z_index2, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size); } else { P = P_constrastive(z_index, z_index2, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.exp_cos_sim); l.p_constrastive[z_index*step + z_index2] = P; } int q; for (q = 0; q < contr_size; ++q) if (contrast_p[q].i == z_index && contrast_p[q].j == z_index2) { contrast_p[q].P = P; break; } //if (q == contr_size) getchar(); //if (P > 1 || P < -1) { // printf(" p = %f, z_index = %d, z_index2 = %d ", P, z_index, z_index2); getchar(); //} } } } } } } } } } // calc deltas #pragma omp parallel for for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; const size_t step = l.batch*l.n*l.h*l.w; if (l.labels[z_index] < 0) continue; const int delta_index = b*l.embedding_size*l.n*l.h*l.w + n*l.embedding_size*l.h*l.w + h*l.w + w; const int wh = l.w*l.h; if (l.detection) { // detector // positive grad_contrastive_loss_positive_f(z_index, l.class_ids, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size); // negative grad_contrastive_loss_negative_f(z_index, l.class_ids, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size, l.contrastive_neg_max); } else { // classifier // positive grad_contrastive_loss_positive(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh); // negative grad_contrastive_loss_negative(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh); } } } } } scal_cpu(l.inputs * l.batch, l.cls_normalizer, l.delta, 1); for (i = 0; i < l.inputs * l.batch; ++i) { l.delta[i] = clip_value(l.delta[i], l.max_delta); } *(l.cost) = pow(mag_array(l.delta, l.inputs * l.batch), 2); if (state.net.adversarial) { printf(" adversarial contrastive loss = %f \n\n", *(l.cost)); } else { printf(" contrastive loss = %f \n\n", *(l.cost)); } for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; //if (l.labels[z_index] < 0) continue; if (z[z_index]) free(z[z_index]); } } } } free(contrast_p); free(z); } void backward_contrastive_layer(contrastive_layer l, network_state state) { axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, state.delta, 1); } #ifdef GPU void pull_contrastive_layer_output(const contrastive_layer l) { cuda_pull_array(l.output_gpu, l.output, l.inputs*l.batch); } void push_contrastive_layer_output(const contrastive_layer l) { cuda_push_array(l.delta_gpu, l.delta, l.inputs*l.batch); } void forward_contrastive_layer_gpu(contrastive_layer l, network_state state) { simple_copy_ongpu(l.batch*l.inputs, state.input, l.output_gpu); if (!state.train) return; float *in_cpu = (float *)xcalloc(l.batch*l.inputs, sizeof(float)); cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); memcpy(in_cpu, l.output, l.batch*l.outputs * sizeof(float)); float *truth_cpu = 0; if (state.truth) { int num_truth = l.batch*l.classes; if (l.detection) num_truth = l.batch*l.truths; truth_cpu = (float *)xcalloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } network_state cpu_state = state; cpu_state.net = state.net; cpu_state.index = state.index; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_contrastive_layer(l, cpu_state); cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); free(in_cpu); if (cpu_state.truth) free(cpu_state.truth); } void backward_contrastive_layer_gpu(contrastive_layer layer, network_state state) { axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1); } #endif
conv_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <unordered_map> #include <vector> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #ifdef __NVCC__ #include "paddle/fluid/operators/math/vol2col.h" #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/math_function.h" #else // __NVCC__ #include "./math/im2col.h" #include "./math/vol2col.h" #include "./math/math_function.h" #endif // __NVCC__ #include "mpc_op.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; constexpr int kConvMKLDNNFP32 = 1; constexpr int kConvMKLDNNINT8 = 2; constexpr int MaxKeyLength = 256; // Base convolution operator definations for other conv // like operators to reuse the implementation. inline int ConvOutputSize(int input_size, int filter_size, int dilation, int padding, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; int output_size = (input_size + 2 * padding - dkernel) / stride + 1; PADDLE_ENFORCE_GT( output_size, 0, platform::errors::InvalidArgument( "The output's size is expected to be greater than 0. " "But recieved: output's size is %d. The output's size is computed by " "((input_size + 2 * padding - (dilation * (filter_size - 1) + 1)) / " "stride + 1), where input_size is %d, padding is %d, " "filter_size is %d, dilation is %d, stride is %d.", output_size, input_size, padding, filter_size, dilation, stride)); return output_size; } inline int ConvOutputSize(int input_size, int filter_size, int dilation, int padding_1, int padding_2, int stride) { const int dkernel = dilation * (filter_size - 1) + 1; int output_size = (input_size + padding_1 + padding_2 - dkernel) / stride + 1; PADDLE_ENFORCE_GT( output_size, 0, platform::errors::InvalidArgument( "The output's size is expected to be greater than 0. " "But recieved: output's size is %d. The output's size is computed by " "((input_size + padding_1 + padding_2 - (dilation * (filter_size - " "1) + 1)) / stride + 1), where input_size is %d, padding is " "(%d, %d), filter_size is %d, dilation is %d, stride is %d.", output_size, input_size, padding_1, padding_2, filter_size, dilation, stride)); return output_size; } template <typename T = int> inline void UpdatePaddingAndDilation(std::vector<T>* paddings, std::vector<T>* dilation, const std::string& padding_algorithm, const framework::DDim data_dims, const std::vector<T>& strides, const std::vector<T>& ksize) { // set padding size == data_dims.size() * 2 auto data_shape = framework::vectorize<T>(data_dims); if (static_cast<int>(paddings->size()) == data_dims.size()) { for (int i = 0; i < data_dims.size(); ++i) { T copy_pad = *(paddings->begin() + 2 * i); paddings->insert(paddings->begin() + 2 * i + 1, copy_pad); } } else { PADDLE_ENFORCE_EQ( data_dims.size() * 2, paddings->size(), platform::errors::InvalidArgument( "Attribute padding's size should be the same or twice as the " "input's dimension. " "But recieved: padding's size is %d, padding is [%s]; input's " "dimension is %d, input's shape is [%s].", paddings->size(), framework::make_ddim(*paddings), data_dims.size(), data_dims)); } // when padding_algorithm is "VALID" or "SAME" if (padding_algorithm == "SAME") { for (int i = 0; i < data_dims.size(); ++i) { T out_size = (data_dims[i] + strides[i] - 1) / strides[i]; T pad_sum = std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], static_cast<T>(0)); T pad_0 = pad_sum / 2; T pad_1 = pad_sum - pad_0; *(paddings->begin() + i * 2) = pad_0; *(paddings->begin() + i * 2 + 1) = pad_1; // dilation *(dilation->begin() + i) = 1; } } else if (padding_algorithm == "VALID") { for (auto it = paddings->begin(); it != paddings->end(); it++) { *it = 0; } } } inline bool IsExpand(const std::vector<int64_t>& filter_dim, const std::vector<int>& strides, const std::vector<int>& paddings, const std::vector<int>& dilations) { bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true; for (size_t j = 0; j < strides.size(); ++j) { // extra 1 for share dim filter_1 = filter_1 && (static_cast<int>(filter_dim[j + 2 + 1]) == 1); strides_1 = strides_1 && (strides[j] == 1); padding_0 = padding_0 && (paddings[j] == 0); dilation_1 = dilation_1 && (dilations[j] == 1); } if (paddings.size() != strides.size()) { for (size_t j = 0; j < paddings.size(); ++j) { padding_0 = padding_0 && (paddings[j] == 0); } } return !(filter_1 && strides_1 && padding_0 && dilation_1); } template <typename DeviceContext, typename T> inline void ResizeToChannelFirst(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input, bool is_output = false) { // extra 1 for leading share dim S int dim = input->dims().size() - 2 - 1; if (dim == 3) { // input transformed_input->Resize(input->dims()); auto in_dims_vec = framework::vectorize(input->dims()); if (is_output) { // same as paddle, resize output of conv op // SNDHWC -> SNCDHW // all for simulate paddle conv op (plaintext)'s behavior in_dims_vec[0] = input->dims()[0]; in_dims_vec[1] = input->dims()[1]; in_dims_vec[2] = input->dims()[5]; in_dims_vec[3] = input->dims()[2]; in_dims_vec[4] = input->dims()[3]; in_dims_vec[5] = input->dims()[4]; } else { // SNDHWC -> NCSDHW in_dims_vec[0] = input->dims()[1]; in_dims_vec[1] = input->dims()[5]; in_dims_vec[2] = input->dims()[0]; in_dims_vec[3] = input->dims()[2]; in_dims_vec[4] = input->dims()[3]; in_dims_vec[5] = input->dims()[4]; } transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } else if (dim == 2) { // input transformed_input->Resize(input->dims()); auto in_dims_vec = framework::vectorize(input->dims()); if (is_output) { in_dims_vec[0] = input->dims()[0]; in_dims_vec[1] = input->dims()[1]; in_dims_vec[2] = input->dims()[4]; in_dims_vec[3] = input->dims()[2]; in_dims_vec[4] = input->dims()[3]; } else { // SNHWC -> NCSHW in_dims_vec[0] = input->dims()[1]; in_dims_vec[1] = input->dims()[4]; in_dims_vec[2] = input->dims()[0]; in_dims_vec[3] = input->dims()[2]; in_dims_vec[4] = input->dims()[3]; } transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } } template <typename DeviceContext, typename T> inline void ResizeToChannelLast(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { // extra 1 for leading share dim S int dim = input->dims().size() - 2 - 1; if (dim == 3) { // input transformed_input->Resize(input->dims()); // NCSDHW -> SNDHWC auto in_dims_vec = framework::vectorize(input->dims()); in_dims_vec[0] = input->dims()[2]; in_dims_vec[1] = input->dims()[0]; in_dims_vec[2] = input->dims()[3]; in_dims_vec[3] = input->dims()[4]; in_dims_vec[4] = input->dims()[5]; in_dims_vec[5] = input->dims()[1]; transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } else if (dim == 2) { // input transformed_input->Resize(input->dims()); // NCSHW -> SNHWC auto in_dims_vec = framework::vectorize(input->dims()); in_dims_vec[0] = input->dims()[2]; in_dims_vec[1] = input->dims()[0]; in_dims_vec[2] = input->dims()[3]; in_dims_vec[3] = input->dims()[4]; in_dims_vec[4] = input->dims()[1]; transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } } template <typename DeviceContext, typename T> inline void ResizeToShareLast(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { transformed_input->Resize(input->dims()); // SNC.. -> NCS.. auto in_dims_vec = framework::vectorize(input->dims()); in_dims_vec[0] = input->dims()[1]; in_dims_vec[1] = input->dims()[2]; in_dims_vec[2] = input->dims()[0]; transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } template <typename DeviceContext, typename T> inline void ResizeToShareFirst(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { transformed_input->Resize(input->dims()); // NCS.. -> SNC.. auto in_dims_vec = framework::vectorize(input->dims()); in_dims_vec[0] = input->dims()[2]; in_dims_vec[1] = input->dims()[0]; in_dims_vec[2] = input->dims()[1]; transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } template <typename DeviceContext, typename T> inline void TransToChannelFirst(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input, bool is_output = false) { // extra 1 for leading share dim // swap share and batch_size int dim = input->dims().size() - 2 - 1; if (dim == 3) { auto& dev_ctx = context.template device_context<DeviceContext>(); std::vector<int> axis; if (is_output) { axis = std::vector<int>{0, 1, 5, 2, 3, 4}; } else { axis = std::vector<int>{1, 5, 0, 2, 3, 4}; } math::Transpose<DeviceContext, T, 6> trans6; trans6(dev_ctx, *input, transformed_input, axis); } else if (dim == 2) { auto& dev_ctx = context.template device_context<DeviceContext>(); std::vector<int> axis{1, 4, 0, 2, 3}; if (is_output) { axis = std::vector<int>{0, 1, 4, 2, 3}; } else { axis = std::vector<int>{1, 4, 0, 2, 3}; } math::Transpose<DeviceContext, T, 5> trans5; trans5(dev_ctx, *input, transformed_input, axis); } } template <typename DeviceContext, typename T> inline void TransToChannelLast(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { // extra 1 for leading share dim // swap share and batch_size int dim = input->dims().size() - 2 - 1; if (dim == 3) { auto& dev_ctx = context.template device_context<DeviceContext>(); std::vector<int> axis{0, 1, 3, 4, 5, 2}; math::Transpose<DeviceContext, T, 6> trans6; trans6(dev_ctx, *input, transformed_input, axis); } else if (dim == 2) { auto& dev_ctx = context.template device_context<DeviceContext>(); std::vector<int> axis{0, 1, 3, 4, 2}; math::Transpose<DeviceContext, T, 5> trans5; trans5(dev_ctx, *input, transformed_input, axis); } } template <typename DeviceContext, typename T> inline void TransToShareFirst(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { int dim = input->dims().size(); PADDLE_ENFORCE_GT( dim, 3, platform::errors::InvalidArgument( "The input's dim is expected to be greater than 4.")); std::vector<int> axis(dim); for (size_t i = 3; i < dim; ++i) { axis[i] = i; } // share axis[0] = 2; // N axis[1] = 0; // C axis[2] = 1; auto& dev_ctx = context.template device_context<DeviceContext>(); switch(dim) { case 4: math::Transpose<DeviceContext, T, 4> trans4; trans4(dev_ctx, *input, transformed_input, axis); break; case 5: math::Transpose<DeviceContext, T, 5> trans5; trans5(dev_ctx, *input, transformed_input, axis); break; case 6: math::Transpose<DeviceContext, T, 6> trans6; trans6(dev_ctx, *input, transformed_input, axis); break; default: PADDLE_ENFORCE_LT( dim, 7, platform::errors::InvalidArgument( "The input's dim greater than 6 not supported yet. ")); } } template <typename DeviceContext, typename T> inline void TransToShareLast(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { int dim = input->dims().size(); PADDLE_ENFORCE_GT( dim, 4, platform::errors::InvalidArgument( "The input's dim is expected to be greater than 4.")); std::vector<int> axis(dim); for (size_t i = 3; i < dim; ++i) { axis[i] = i; } // SNC -> NCS axis[0] = 1; axis[1] = 2; axis[2] = 0; auto& dev_ctx = context.template device_context<DeviceContext>(); switch(dim) { case 5: math::Transpose<DeviceContext, T, 5> trans5; trans5(dev_ctx, *input, transformed_input, axis); break; case 6: math::Transpose<DeviceContext, T, 6> trans6; trans6(dev_ctx, *input, transformed_input, axis); break; default: PADDLE_ENFORCE_LT( dim, 7, platform::errors::InvalidArgument( "The input's dim greater than 6 not supported yet. ")); } } template <typename DeviceContext, typename T> inline void TransToBatchFirst(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { int dim = input->dims().size(); PADDLE_ENFORCE_GT( dim, 4, platform::errors::InvalidArgument( "The input's dim is expected to be greater than 4.")); std::vector<int> axis(dim); for (size_t i = 3; i < dim; ++i) { axis[i] = i; } // N axis[0] = 1; // C axis[1] = 2; // share axis[2] = 0; auto& dev_ctx = context.template device_context<DeviceContext>(); switch(dim) { case 5: math::Transpose<DeviceContext, T, 5> trans5; trans5(dev_ctx, *input, transformed_input, axis); break; case 6: math::Transpose<DeviceContext, T, 6> trans6; trans6(dev_ctx, *input, transformed_input, axis); break; default: PADDLE_ENFORCE_LT( dim, 7, platform::errors::InvalidArgument( "The input's dim greater than 6 not supported yet. ")); } } template <typename DeviceContext, typename T> inline void ResizeToSwapedLeadingDims(const framework::ExecutionContext& context, const Tensor* input, Tensor* transformed_input) { transformed_input->Resize(input->dims()); // NS.. -> SN.. // or CS.. -> SC.. auto in_dims_vec = framework::vectorize(input->dims()); in_dims_vec[0] = input->dims()[1]; in_dims_vec[1] = input->dims()[0]; transformed_input->Resize(framework::make_ddim(in_dims_vec)); transformed_input->mutable_data<T>(context.GetPlace()); } template <typename DeviceContext, typename T> void TransToSwapedLeadingDims(const framework::ExecutionContext& context, const Tensor* input, Tensor* output){ output->Resize(input->dims()); auto in_dims_vec = framework::vectorize(input->dims()); in_dims_vec[0] = input->dims()[1]; in_dims_vec[1] = input->dims()[0]; output->Resize(framework::make_ddim(in_dims_vec)); output->mutable_data<T>(context.GetPlace()); const int dim = input->dims().size(); std::vector<int> axis(dim); for (size_t i = 0; i < dim; ++i) { axis[i] = i; } axis[0] = 1; axis[1] = 0; auto& dev_ctx = context.template device_context<DeviceContext>(); switch(dim) { case 3: math::Transpose<DeviceContext, T, 3> trans3; trans3(dev_ctx, *input, output, axis); break; case 4: math::Transpose<DeviceContext, T, 4> trans4; trans4(dev_ctx, *input, output, axis); break; case 5: math::Transpose<DeviceContext, T, 5> trans5; trans5(dev_ctx, *input, output, axis); break; case 6: math::Transpose<DeviceContext, T, 6> trans6; trans6(dev_ctx, *input, output, axis); break; default: PADDLE_ENFORCE_GT( dim, 2, platform::errors::InvalidArgument( "The input's dim less than 3 not supported yet. ")); PADDLE_ENFORCE_LT( dim, 7, platform::errors::InvalidArgument( "The input's dim greater than 6 not supported yet. ")); } return; } template <typename DeviceContext, typename T, typename Func> void SharesToCols(const framework::ExecutionContext& context, const Tensor* input, const std::vector<int>& dilations, const std::vector<int>& strides, const std::vector<int>& paddings, Tensor* col, Func data2col) { // // input: CSHW or CSDHW, S for share dim framework::DDim in_plain_dim = framework::slice_ddim(input->dims(), 1, input->dims().size()); framework::DDim col_plain_dim = framework::slice_ddim(col->dims(), 1, col->dims().size()); auto& dev_ctx = context.template device_context<DeviceContext>(); const int share_size = input->dims()[0]; for (size_t i = 0; i < share_size; ++i) { Tensor share = input->Slice(i, i + 1).Resize(in_plain_dim); Tensor col_share = col->Slice(i, i + 1).Resize(col_plain_dim); data2col(dev_ctx, share, dilations, strides, paddings, &col_share); } } template <typename DeviceContext, typename T> Tensor SwapedLeadingDims(const framework::ExecutionContext& context, const Tensor* input) { Tensor output(input->type()); ResizeToSwapedLeadingDims<DeviceContext, T>(context, input, &output); TransToSwapedLeadingDims<DeviceContext, T>(context, input, &output); return output; } template <typename DeviceContext, typename T> Tensor TransposeMpcMat(const framework::ExecutionContext& context, const Tensor* input) { Tensor output(input->type()); auto in_dims_vec = framework::vectorize(input->dims()); PADDLE_ENFORCE_EQ( in_dims_vec.size(), 3, platform::errors::InvalidArgument( "The input's dim should be 3. ")); in_dims_vec[0] = input->dims()[0]; in_dims_vec[1] = input->dims()[2]; in_dims_vec[2] = input->dims()[1]; output.Resize(framework::make_ddim(in_dims_vec)); output.mutable_data<T>(context.GetPlace()); std::vector<int> axis(3); axis[0] = 0; axis[1] = 2; axis[2] = 1; auto& dev_ctx = context.template device_context<DeviceContext>(); math::Transpose<DeviceContext, T, 3> trans3; trans3(dev_ctx, *input, &output, axis); return output; } // Define Op classes in .h file so that other conv // operator implementations can reuse the code. class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() final; protected: virtual void Apply() {} }; class ConvOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput { protected: std::unordered_map<std::string, std::string>& GetInputOutputWithSameType() const override { static std::unordered_map<std::string, std::string> m{ {"Input", /*->*/ "Output"}}; return m; } }; template <typename DeviceContext, typename T> struct CopyData { void operator()(T* dst, const T* src, size_t numel); }; class ConvOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { std::vector<int64_t> output_shape = ComputeOutputShape(ctx); OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Conv"); ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->ShareLoD("Input", "Output"); } protected: std::vector<int64_t> ComputeOutputShape( framework::InferShapeContext* ctx) const; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; class ConvOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; // TODO: add conv double grad template <typename DeviceContext, typename T> class GemmConvKernel : public MpcOpKernel<T> { public: void ComputeImpl(const framework::ExecutionContext& context) const override { const Tensor* input = context.Input<Tensor>("Input"); // The filter will be reshaped in the calculations, // so here use an assignment operation, // that avoids modifying the variable in the Scope. Tensor filter = *context.Input<Tensor>("Filter"); Tensor* output = context.Output<Tensor>("Output"); output->mutable_data<T>(context.GetPlace()); const int groups = context.Attr<int>("groups"); const std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = context.Attr<std::vector<int>>("dilations"); const std::string padding_algorithm = context.Attr<std::string>("padding_algorithm"); const std::string data_format = context.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); Tensor transformed_input(input->type()); Tensor transformed_output(output->type()); if (channel_last) { ResizeToChannelFirst<DeviceContext, T>(context, input, &transformed_input); TransToChannelFirst<DeviceContext, T>(context, input, &transformed_input); ResizeToChannelFirst<DeviceContext, T>(context, output, &transformed_output, true); } else { ResizeToShareLast<DeviceContext, T>(context, input, &transformed_input); TransToShareLast<DeviceContext, T>(context, input, &transformed_input); transformed_output = *output; } // update padding and dilation auto trans_in_dims = transformed_input.dims(); auto filter_dims = filter.dims(); // extra 1 for share dim framework::DDim in_data_dims = framework::slice_ddim(trans_in_dims, 2 + 1, trans_in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2 + 1, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); auto& dev_ctx = context.template device_context<DeviceContext>(); const int batch_size = static_cast<int>(transformed_input.dims()[0]); // filter_shape_vec: // {k_share, k_o, k_i, k_h, k_w} or {k_share, k_o, k_i, k_d, k_h, k_w} std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); // output_shape_vec: // {o_n, o_c, o_share, o_h, o_w} or {o_n, o_c, o_share, o_d, o_h, o_w} std::vector<int64_t> output_shape_vec( framework::vectorize(transformed_output.dims())); // use col_shape in the im2col calculation // col_shape_vec: // {i_s, i_c/g, k_h, k_w, o_h, o_w} or {i_s, i_c/g, k_d, k_h, k_w, // o_d, o_h, o_w} size_t data_dim = filter_shape_vec.size() - 2 - 1; std::vector<int64_t> col_shape_vec(2 + 2 * data_dim); col_shape_vec[0] = trans_in_dims[2]; col_shape_vec[1] = trans_in_dims[1] / groups; std::vector<int64_t> col_matrix_shape_vec(3); col_matrix_shape_vec[0] = col_shape_vec[0]; col_matrix_shape_vec[1] = col_shape_vec[1]; col_matrix_shape_vec[2] = 1; // use col_matrix_shape in the gemm calculation // size: // (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d * o_h * // o_w) for (size_t j = 0; j < data_dim; ++j) { col_shape_vec[j + 2] = filter_shape_vec[j + 3]; col_shape_vec[j + 2 + data_dim] = output_shape_vec[j + 3]; col_matrix_shape_vec[1] *= filter_shape_vec[j + 3]; col_matrix_shape_vec[2] *= output_shape_vec[j + 3]; } framework::DDim col_shape(framework::make_ddim(col_shape_vec)); framework::DDim col_matrix_shape(framework::make_ddim(col_matrix_shape_vec)); bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; // col_matrix shares the same piece of data with col, // but will be reshaped into a two-dimensional matrix shape // to call the matrix multiplication interface. Tensor col_matrix; if (is_expand) { col = context.AllocateTmpTensor<T, DeviceContext>(col_shape, dev_ctx); col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); } // with share dim framework::DDim in_matrix_shape = framework::slice_ddim( transformed_input.dims(), 1, transformed_input.dims().size()); // SOIHW or SOIDHW framework::DDim filter_matrix_shape = {filter.dims()[0], filter.dims()[1], filter.numel() / (filter.dims()[0] * filter.dims()[1]) }; filter.Resize(filter_matrix_shape); int in_step = static_cast<int>(transformed_input.dims()[1]) / groups; int out_step = static_cast<int>(transformed_output.dims()[2]) / groups; // S, N*groups, C/groups, H*W or D*H*W framework::DDim output_matrix_shape = { transformed_output.dims()[0], batch_size * groups, out_step, transformed_output.numel() / (transformed_output.dims()[0] * transformed_output.dims()[1] * transformed_output.dims()[2])}; // convolution operator: im2col(or vol2col) + gemm math::Vol2ColFunctor<DeviceContext, T> vol2col; math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col; Tensor batched_col; Tensor batched_filter; batched_col.mutable_data<T>(framework::make_ddim({batch_size * groups, col_matrix_shape[0], col_matrix_shape[1], col_matrix_shape[2]}), context.GetPlace(), 0); batched_filter.mutable_data<T>(framework::make_ddim({batch_size, filter_matrix_shape[0], groups, out_step, filter_matrix_shape[2], }), context.GetPlace(), 0); auto original_out_dims = transformed_output.dims(); transformed_output.Resize(output_matrix_shape); transformed_output.mutable_data<T>(context.GetPlace()); auto copy_functor = CopyData<DeviceContext, T>(); for (int i = 0; i < batch_size; i++) { Tensor in_batch = transformed_input.Slice(i, i + 1).Resize(in_matrix_shape); Tensor filter_slice = batched_filter.Slice(i, i + 1); copy_functor(filter_slice.data<T>(), filter.data<T>(), filter.numel()); for (int g = 0; g < groups; g++) { Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); Tensor in_slice_ = SwapedLeadingDims<DeviceContext, T>(context, &in_slice); if (!is_expand) { col.ShareDataWith(in_slice_); col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); } else if (data_dim == 2U) { SharesToCols<DeviceContext, T>(context, &in_slice_, dilations, strides, std::vector<int>{paddings[0], paddings[2], paddings[1], paddings[3]}, &col, im2col); } else if (data_dim == 3U) { SharesToCols<DeviceContext, T>(context, &in_slice_, dilations, strides, paddings, &col, vol2col); } size_t col_matrix_size = col_matrix.numel(); size_t col_group_size = col_matrix_size * groups; copy_functor(batched_col.template data<T>() + i * col_group_size + g * col_matrix_size, col_matrix.template data<T>(), col_matrix_size); } } Tensor batched_col_ = SwapedLeadingDims<DeviceContext, T>(context, &batched_col); Tensor batched_fil_ = SwapedLeadingDims<DeviceContext, T>(context, &batched_filter); batched_fil_.Resize(framework::make_ddim({filter_matrix_shape[0], batch_size * groups, out_step, filter_matrix_shape[2], })); // TransToShareFirst<DeviceContext, T>(context, &batched_filter, &batched_fil_); mpc::MpcInstance::mpc_instance()->mpc_protocol()->mpc_operators()->matmul( &batched_fil_, &batched_col_, &transformed_output); transformed_output.Resize(original_out_dims); if (channel_last) { TransToChannelLast<DeviceContext, T>(context, &transformed_output, output); } } }; template <typename DeviceContext, typename T> class GemmConvGradKernel : public MpcOpKernel<T> { public: void ComputeImpl(const framework::ExecutionContext& context) const override { const Tensor* input = context.Input<Tensor>("Input"); const Tensor* output_grad = context.Input<Tensor>(framework::GradVarName("Output")); Tensor* input_grad = context.Output<Tensor>(framework::GradVarName("Input")); Tensor* filter_grad = context.Output<Tensor>(framework::GradVarName("Filter")); // The filter and filter_grad will be reshaped in the calculations, // so here use an assignment operation, // that avoids modifying the variable in the Scope. Tensor filter = *context.Input<Tensor>("Filter"); if (!input_grad && !filter_grad) return; int groups = context.Attr<int>("groups"); const std::vector<int> strides = context.Attr<std::vector<int>>("strides"); std::vector<int> paddings = context.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = context.Attr<std::vector<int>>("dilations"); const std::string padding_algorithm = context.Attr<std::string>("padding_algorithm"); const std::string data_format = context.Attr<std::string>("data_format"); const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); Tensor transformed_input(input->type()); Tensor transformed_output_grad(output_grad->type()); if (channel_last) { ResizeToChannelFirst<DeviceContext, T>(context, input, &transformed_input); TransToChannelFirst<DeviceContext, T>(context, input, &transformed_input); ResizeToChannelFirst<DeviceContext, T>(context, output_grad, &transformed_output_grad, true); TransToChannelFirst<DeviceContext, T>(context, output_grad, &transformed_output_grad, true); } else { ResizeToShareLast<DeviceContext, T>(context, input, &transformed_input); TransToShareLast<DeviceContext, T>(context, input, &transformed_input); transformed_output_grad = *output_grad; } // update padding and dilation auto in_dims = transformed_input.dims(); auto filter_dims = filter.dims(); // extra 1 for share dim framework::DDim in_data_dims = framework::slice_ddim(in_dims, 2 + 1, in_dims.size()); framework::DDim filter_data_dims = framework::slice_ddim(filter_dims, 2 + 1, filter_dims.size()); std::vector<int> ksize = framework::vectorize<int>(filter_data_dims); UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); const int batch_size = static_cast<int>(transformed_input.dims()[0]); auto& dev_ctx = context.template device_context<DeviceContext>(); // filter_shape_vec: {k_share, k_o, k_i, k_h, k_w} or {k_share, k_o, k_i, k_d, k_h, k_w} std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); // output_shape_vec: {o_n, o_c, o_share, o_h, o_w} or {o_n, o_c, o_share, o_d, o_h, o_w} std::vector<int64_t> output_shape_vec( framework::vectorize(transformed_output_grad.dims())); // use col_shape in the im2col calculation // col_shape_vec: {i_c/g, k_h, k_w, o_h, o_w} or {i_c/g, k_d, k_h, k_w, o_d, // o_h, o_w} size_t data_dim = filter_shape_vec.size() - 2 - 1; std::vector<int64_t> col_shape_vec(2 + 2 * data_dim); col_shape_vec[0] = in_dims[2]; col_shape_vec[1] = in_dims[1] / groups; std::vector<int64_t> col_matrix_shape_vec(3); col_matrix_shape_vec[0] = col_shape_vec[0]; col_matrix_shape_vec[1] = col_shape_vec[1]; col_matrix_shape_vec[2] = 1; // use col_matrix_shape in the gemm calculation // size: // (i_c/g * k_h * k_w, o_h * o_w) or (i_c/g * k_d * k_h * k_w, o_d * o_h * // o_w) for (size_t j = 0; j < data_dim; ++j) { col_shape_vec[j + 2] = filter_shape_vec[j + 3]; col_shape_vec[j + 2 + data_dim] = output_shape_vec[j + 3]; col_matrix_shape_vec[1] *= filter_shape_vec[j + 3]; col_matrix_shape_vec[2] *= output_shape_vec[j + 3]; } framework::DDim col_shape(framework::make_ddim(col_shape_vec)); framework::DDim col_matrix_shape(framework::make_ddim(col_matrix_shape_vec)); // with share dim framework::DDim input_shape = framework::slice_ddim( transformed_input.dims(), 1, transformed_input.dims().size()); // SOIHW or SOIDHW framework::DDim filter_matrix_shape = {filter.dims()[0], filter.dims()[1], filter.numel() / (filter.dims()[0] * filter.dims()[1]) }; filter.Resize(filter_matrix_shape); // convolution backward input operator: gemm + col2im(or col2vol) // convolution backward weight operator: im2col(or vol2col) + gemm int in_step = static_cast<int>(transformed_input.dims()[1]) / groups; int out_step = static_cast<int>(transformed_output_grad.dims()[2]) / groups; bool is_expand = IsExpand(filter_shape_vec, strides, paddings, dilations); Tensor col; // col_matrix shares the same piece of data with col, // but will be reshaped into a two-dimensional matrix shape // to call the matrix multiplication interface. Tensor col_matrix; if (is_expand) { col = context.AllocateTmpTensor<T, DeviceContext>(col_shape, dev_ctx); col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); } math::SetConstant<DeviceContext, T> set_zero; Tensor batched_filter; batched_filter.mutable_data<T>(framework::make_ddim({batch_size, filter_matrix_shape[0], groups, out_step, filter_matrix_shape[2], }), context.GetPlace(), 0); auto copy_functor = CopyData<DeviceContext, T>(); // #pragma omp for for (int i = 0; i < batch_size; i++) { Tensor filter_slice = batched_filter.Slice(i, i + 1); copy_functor(filter_slice.data<T>(), filter.data<T>(), filter.numel()); } Tensor batched_fil_ = SwapedLeadingDims<DeviceContext, T>(context, &batched_filter); batched_fil_.Resize(framework::make_ddim({filter_matrix_shape[0], batch_size * groups, out_step, filter_matrix_shape[2], })); transformed_output_grad.Resize(framework::make_ddim({ transformed_output_grad.dims()[0], batch_size * groups, out_step, transformed_output_grad.numel() / ( transformed_output_grad.dims()[0] * transformed_output_grad.dims()[1] * transformed_output_grad.dims()[2]) })); if (input_grad) { input_grad->mutable_data<T>(context.GetPlace()); Tensor transformed_input_grad(input_grad->type()); if (channel_last) { ResizeToChannelFirst<DeviceContext, T>(context, input_grad, &transformed_input_grad); } else { ResizeToShareLast<DeviceContext, T>(context, input_grad, &transformed_input_grad); } // if is_expand is false, the operation of set_zero is unnecessary, // because math::matmul will reset input_grad. if (is_expand) { set_zero(dev_ctx, &transformed_input_grad, static_cast<T>(0)); } Tensor batched_gemm(input_grad->type()); batched_gemm.Resize(framework::make_ddim({col_matrix_shape[0], batch_size * groups, col_matrix_shape[1], col_matrix_shape[2], })); batched_gemm.mutable_data<T>(context.GetPlace()); mpc::MpcInstance::mpc_instance()->mpc_protocol()->mpc_operators()->matmul( &batched_fil_, &transformed_output_grad, &batched_gemm, true, false); batched_gemm = SwapedLeadingDims<DeviceContext, T>( context, &batched_gemm); batched_gemm.Resize(framework::make_ddim({batch_size, groups, col_matrix_shape[0], col_matrix_shape[1], col_matrix_shape[2], })); math::Col2VolFunctor<DeviceContext, T> col2vol; math::Col2ImFunctor<math::ColFormat::kCFO, DeviceContext, T> col2im; for (int i = 0; i < batch_size; i++) { Tensor in_grad_batch = transformed_input_grad.Slice(i, i + 1).Resize(input_shape); Tensor gemm_group = batched_gemm.Slice(i, i + 1).Resize(framework::make_ddim( {groups, col_matrix_shape[0], col_matrix_shape[1], col_matrix_shape[2], })); for (int g = 0; g < groups; g++) { Tensor in_grad_slice = in_grad_batch.Slice(g * in_step, (g + 1) * in_step); Tensor gemm = gemm_group.Slice(g, g + 1).Resize(framework::make_ddim( { col_matrix_shape[0], col_matrix_shape[1], col_matrix_shape[2], })); Tensor im_ = SwapedLeadingDims<DeviceContext, T>(context, &in_grad_slice); if (!is_expand) { gemm.Resize(im_.dims()); } else { gemm.Resize(col.dims()); } if (is_expand && data_dim == 2U) { SharesToCols<DeviceContext, T>(context, &gemm, dilations, strides, std::vector<int>{paddings[0], paddings[2], paddings[1], paddings[3]}, &im_, col2im); } else if (is_expand && data_dim == 3U) { SharesToCols<DeviceContext, T>(context, &gemm, dilations, strides, paddings, &im_, col2vol); } TransToSwapedLeadingDims<DeviceContext, T>(context, is_expand ? &im_ : &gemm, &in_grad_slice); } } if (channel_last) { TransToChannelLast<DeviceContext, T>(context, &transformed_input_grad, input_grad); } else { TransToShareFirst<DeviceContext, T>(context, &transformed_input_grad, input_grad); } } if (filter_grad) { filter_grad->mutable_data<T>(context.GetPlace()); auto filter_grad_dims = filter_grad->dims(); math::Im2ColFunctor<math::ColFormat::kCFO, DeviceContext, T> im2col; math::Vol2ColFunctor<DeviceContext, T> vol2col; Tensor batched_col; batched_col.mutable_data<T>(framework::make_ddim({batch_size * groups, col_matrix_shape[0], col_matrix_shape[1], col_matrix_shape[2]}), context.GetPlace(), 0); for (int i = 0; i < batch_size; i++) { Tensor in_batch = transformed_input.Slice(i, i + 1).Resize(input_shape); for (int g = 0; g < groups; g++) { Tensor in_slice = in_batch.Slice(g * in_step, (g + 1) * in_step); Tensor in_slice_ = SwapedLeadingDims<DeviceContext, T>(context, &in_slice); if (!is_expand) { col.ShareDataWith(in_slice_); col_matrix.ShareDataWith(col); col_matrix.Resize(col_matrix_shape); } else if (data_dim == 2U) { SharesToCols<DeviceContext, T>(context, &in_slice_, dilations, strides, std::vector<int>{paddings[0], paddings[2], paddings[1], paddings[3]}, &col, im2col); } else if (data_dim == 3U) { SharesToCols<DeviceContext, T>(context, &in_slice_, dilations, strides, paddings, &col, vol2col); } size_t col_matrix_size = col_matrix.numel(); size_t col_group_size = col_matrix_size * groups; copy_functor(batched_col.template data<T>() + i * col_group_size + g * col_matrix_size, col_matrix.template data<T>(), col_matrix_size); } } Tensor batched_col_ = SwapedLeadingDims<DeviceContext, T>(context, &batched_col); // gemm transformed_output_grad.Resize(framework::make_ddim({ transformed_output_grad.dims()[0], batch_size, groups * out_step, transformed_output_grad.numel() / ( transformed_output_grad.dims()[0] * transformed_output_grad.dims()[1] * transformed_output_grad.dims()[2]) })); batched_col_.Resize(framework::make_ddim({col_matrix_shape[0], batch_size, groups * col_matrix_shape[1], col_matrix_shape[2]})); Tensor filter_grad_; filter_grad_.mutable_data<T>(framework::make_ddim({filter_matrix_shape[0], 1, groups * out_step, groups * filter_matrix_shape[2] }), context.GetPlace(), 0); mpc::MpcInstance::mpc_instance()->mpc_protocol()->mpc_operators()->matmul( &transformed_output_grad, &batched_col_, &filter_grad_, 0, 1, 1); filter_grad_.Resize(framework::make_ddim({filter_matrix_shape[0], groups, out_step, groups, filter_matrix_shape[2] })); filter_grad->Resize(framework::make_ddim({filter_matrix_shape[0], groups, out_step, filter_matrix_shape[2] })); using EigenTensor5 = paddle::framework::EigenTensor<T, 5>; using EigenTensor4 = paddle::framework::EigenTensor<T, 4>; auto eigen_filter_grad_ = EigenTensor5::From(filter_grad_); auto eigen_filter_grad = EigenTensor4::From(*filter_grad); eigen_filter_grad.device(*dev_ctx.eigen_device()) = eigen_filter_grad_.sum(Eigen::array<int,1>({3})); filter_grad->Resize(filter_grad_dims); } } }; } // namespace operators } // namespace paddle
hist_util.h
/*! * Copyright 2017-2020 by Contributors * \file hist_util.h * \brief Utility for fast histogram aggregation * \author Philip Cho, Tianqi Chen */ #ifndef XGBOOST_COMMON_HIST_UTIL_H_ #define XGBOOST_COMMON_HIST_UTIL_H_ #include <xgboost/data.h> #include <xgboost/generic_parameters.h> #include <limits> #include <vector> #include <algorithm> #include <memory> #include <utility> #include <map> #include "row_set.h" #include "common.h" #include "threading_utils.h" #include "../tree/param.h" #include "./quantile.h" #include "./timer.h" #include "../include/rabit/rabit.h" namespace xgboost { namespace common { /*! * \brief A single row in global histogram index. * Directly represent the global index in the histogram entry. */ using GHistIndexRow = Span<uint32_t const>; // A CSC matrix representing histogram cuts, used in CPU quantile hist. // The cut values represent upper bounds of bins containing approximately equal numbers of elements class HistogramCuts { protected: using BinIdx = uint32_t; public: HostDeviceVector<bst_float> cut_values_; // NOLINT HostDeviceVector<uint32_t> cut_ptrs_; // NOLINT // storing minimum value in a sketch set. HostDeviceVector<float> min_vals_; // NOLINT HistogramCuts(); HistogramCuts(HistogramCuts const& that) { cut_values_.Resize(that.cut_values_.Size()); cut_ptrs_.Resize(that.cut_ptrs_.Size()); min_vals_.Resize(that.min_vals_.Size()); cut_values_.Copy(that.cut_values_); cut_ptrs_.Copy(that.cut_ptrs_); min_vals_.Copy(that.min_vals_); } HistogramCuts(HistogramCuts&& that) noexcept(true) { *this = std::forward<HistogramCuts&&>(that); } HistogramCuts& operator=(HistogramCuts const& that) { cut_values_.Resize(that.cut_values_.Size()); cut_ptrs_.Resize(that.cut_ptrs_.Size()); min_vals_.Resize(that.min_vals_.Size()); cut_values_.Copy(that.cut_values_); cut_ptrs_.Copy(that.cut_ptrs_); min_vals_.Copy(that.min_vals_); return *this; } HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) { cut_ptrs_ = std::move(that.cut_ptrs_); cut_values_ = std::move(that.cut_values_); min_vals_ = std::move(that.min_vals_); return *this; } uint32_t FeatureBins(uint32_t feature) const { return cut_ptrs_.ConstHostVector().at(feature + 1) - cut_ptrs_.ConstHostVector()[feature]; } // Getters. Cuts should be of no use after building histogram indices, but currently // it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve // these for now. std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_.ConstHostVector(); } std::vector<float> const& Values() const { return cut_values_.ConstHostVector(); } std::vector<float> const& MinValues() const { return min_vals_.ConstHostVector(); } size_t TotalBins() const { return cut_ptrs_.ConstHostVector().back(); } // Return the index of a cut point that is strictly greater than the input // value, or the last available index if none exists BinIdx SearchBin(float value, uint32_t column_id) const { auto beg = cut_ptrs_.ConstHostVector().at(column_id); auto end = cut_ptrs_.ConstHostVector().at(column_id + 1); const auto &values = cut_values_.ConstHostVector(); auto it = std::upper_bound(values.cbegin() + beg, values.cbegin() + end, value); BinIdx idx = it - values.cbegin(); if (idx == end) { idx -= 1; } return idx; } BinIdx SearchBin(Entry const& e) const { return SearchBin(e.fvalue, e.index); } }; inline HistogramCuts SketchOnDMatrix(DMatrix *m, int32_t max_bins) { HistogramCuts out; auto const& info = m->Info(); const auto threads = omp_get_max_threads(); std::vector<std::vector<bst_row_t>> column_sizes(threads); for (auto& column : column_sizes) { column.resize(info.num_col_, 0); } std::vector<bst_row_t> reduced(info.num_col_, 0); for (auto const& page : m->GetBatches<SparsePage>()) { auto const &entries_per_column = HostSketchContainer::CalcColumnSize(page, info.num_col_, threads); for (size_t i = 0; i < entries_per_column.size(); ++i) { reduced[i] += entries_per_column[i]; } } HostSketchContainer container(reduced, max_bins, HostSketchContainer::UseGroup(info)); for (auto const &page : m->GetBatches<SparsePage>()) { container.PushRowPage(page, info); } container.MakeCuts(&out); return out; } enum BinTypeSize { kUint8BinsTypeSize = 1, kUint16BinsTypeSize = 2, kUint32BinsTypeSize = 4 }; struct Index { Index() { SetBinTypeSize(binTypeSize_); } Index(const Index& i) = delete; Index& operator=(Index i) = delete; Index(Index&& i) = delete; Index& operator=(Index&& i) = delete; uint32_t operator[](size_t i) const { if (offset_ptr_ != nullptr) { return func_(data_ptr_, i) + offset_ptr_[i%p_]; } else { return func_(data_ptr_, i); } } void SetBinTypeSize(BinTypeSize binTypeSize) { binTypeSize_ = binTypeSize; switch (binTypeSize) { case kUint8BinsTypeSize: func_ = &GetValueFromUint8; break; case kUint16BinsTypeSize: func_ = &GetValueFromUint16; break; case kUint32BinsTypeSize: func_ = &GetValueFromUint32; break; default: CHECK(binTypeSize == kUint8BinsTypeSize || binTypeSize == kUint16BinsTypeSize || binTypeSize == kUint32BinsTypeSize); } } BinTypeSize GetBinTypeSize() const { return binTypeSize_; } template<typename T> T* data() const { // NOLINT return static_cast<T*>(data_ptr_); } uint32_t* Offset() const { return offset_ptr_; } size_t OffsetSize() const { return offset_.size(); } size_t Size() const { return data_.size() / (binTypeSize_); } void Resize(const size_t nBytesData) { data_.resize(nBytesData); data_ptr_ = reinterpret_cast<void*>(data_.data()); } void ResizeOffset(const size_t nDisps) { offset_.resize(nDisps); offset_ptr_ = offset_.data(); p_ = nDisps; } std::vector<uint8_t>::const_iterator begin() const { // NOLINT return data_.begin(); } std::vector<uint8_t>::const_iterator end() const { // NOLINT return data_.end(); } private: static uint32_t GetValueFromUint8(void *t, size_t i) { return reinterpret_cast<uint8_t*>(t)[i]; } static uint32_t GetValueFromUint16(void* t, size_t i) { return reinterpret_cast<uint16_t*>(t)[i]; } static uint32_t GetValueFromUint32(void* t, size_t i) { return reinterpret_cast<uint32_t*>(t)[i]; } using Func = uint32_t (*)(void*, size_t); std::vector<uint8_t> data_; std::vector<uint32_t> offset_; // size of this field is equal to number of features void* data_ptr_; BinTypeSize binTypeSize_ {kUint8BinsTypeSize}; size_t p_ {1}; uint32_t* offset_ptr_ {nullptr}; Func func_; }; /*! * \brief preprocessed global index matrix, in CSR format * * Transform floating values to integer index in histogram This is a global histogram * index for CPU histogram. On GPU ellpack page is used. */ struct GHistIndexMatrix { /*! \brief row pointer to rows by element position */ std::vector<size_t> row_ptr; /*! \brief The index data */ Index index; /*! \brief hit count of each index */ std::vector<size_t> hit_count; /*! \brief The corresponding cuts */ HistogramCuts cut; DMatrix* p_fmat; size_t max_num_bins; // Create a global histogram matrix, given cut void Init(DMatrix* p_fmat, int max_num_bins); // specific method for sparse data as no posibility to reduce allocated memory template <typename BinIdxType, typename GetOffset> void SetIndexData(common::Span<BinIdxType> index_data_span, size_t batch_threads, const SparsePage &batch, size_t rbegin, size_t nbins, GetOffset get_offset) { const xgboost::Entry *data_ptr = batch.data.HostVector().data(); const std::vector<bst_row_t> &offset_vec = batch.offset.HostVector(); const size_t batch_size = batch.Size(); CHECK_LT(batch_size, offset_vec.size()); BinIdxType* index_data = index_data_span.data(); #pragma omp parallel for num_threads(batch_threads) schedule(static) for (omp_ulong i = 0; i < batch_size; ++i) { const int tid = omp_get_thread_num(); size_t ibegin = row_ptr[rbegin + i]; size_t iend = row_ptr[rbegin + i + 1]; const size_t size = offset_vec[i + 1] - offset_vec[i]; SparsePage::Inst inst = {data_ptr + offset_vec[i], size}; CHECK_EQ(ibegin + inst.size(), iend); for (bst_uint j = 0; j < inst.size(); ++j) { uint32_t idx = cut.SearchBin(inst[j]); index_data[ibegin + j] = get_offset(idx, j); ++hit_count_tloc_[tid * nbins + idx]; } } } void ResizeIndex(const size_t rbegin, const SparsePage& batch, const size_t n_offsets, const size_t n_index, const bool isDense); inline void GetFeatureCounts(size_t* counts) const { auto nfeature = cut.Ptrs().size() - 1; for (unsigned fid = 0; fid < nfeature; ++fid) { auto ibegin = cut.Ptrs()[fid]; auto iend = cut.Ptrs()[fid + 1]; for (auto i = ibegin; i < iend; ++i) { counts[fid] += hit_count[i]; } } } inline bool IsDense() const { return isDense_; } private: std::vector<size_t> hit_count_tloc_; bool isDense_; }; template <typename GradientIndex> int32_t XGBOOST_HOST_DEV_INLINE BinarySearchBin(bst_uint begin, bst_uint end, GradientIndex const &data, uint32_t const fidx_begin, uint32_t const fidx_end) { uint32_t previous_middle = std::numeric_limits<uint32_t>::max(); while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return static_cast<int32_t>(gidx); } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } struct GHistIndexBlock { const size_t* row_ptr; const uint32_t* index; inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index) : row_ptr(row_ptr), index(index) {} // get i-th row inline GHistIndexRow operator[](size_t i) const { return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]}; } }; class ColumnMatrix; class GHistIndexBlockMatrix { public: void Init(const GHistIndexMatrix& gmat, const ColumnMatrix& colmat, const tree::TrainParam& param); inline GHistIndexBlock operator[](size_t i) const { return {blocks_[i].row_ptr_begin, blocks_[i].index_begin}; } inline size_t GetNumBlock() const { return blocks_.size(); } private: std::vector<size_t> row_ptr_; std::vector<uint32_t> index_; const HistogramCuts* cut_; struct Block { const size_t* row_ptr_begin; const size_t* row_ptr_end; const uint32_t* index_begin; const uint32_t* index_end; }; std::vector<Block> blocks_; }; template<typename GradientSumT> using GHistRow = Span<xgboost::detail::GradientPairInternal<GradientSumT> >; /*! * \brief fill a histogram by zeros */ template<typename GradientSumT> void InitilizeHistByZeroes(GHistRow<GradientSumT> hist, size_t begin, size_t end); /*! * \brief Increment hist as dst += add in range [begin, end) */ template<typename GradientSumT> void IncrementHist(GHistRow<GradientSumT> dst, const GHistRow<GradientSumT> add, size_t begin, size_t end); /*! * \brief Copy hist from src to dst in range [begin, end) */ template<typename GradientSumT> void CopyHist(GHistRow<GradientSumT> dst, const GHistRow<GradientSumT> src, size_t begin, size_t end); /*! * \brief Compute Subtraction: dst = src1 - src2 in range [begin, end) */ template<typename GradientSumT> void SubtractionHist(GHistRow<GradientSumT> dst, const GHistRow<GradientSumT> src1, const GHistRow<GradientSumT> src2, size_t begin, size_t end); /*! * \brief histogram of gradient statistics for multiple nodes */ template<typename GradientSumT> class HistCollection { public: using GHistRowT = GHistRow<GradientSumT>; using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>; // access histogram for i-th node GHistRowT operator[](bst_uint nid) const { constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max(); CHECK_NE(row_ptr_[nid], kMax); GradientPairT* ptr = const_cast<GradientPairT*>(dmlc::BeginPtr(data_) + row_ptr_[nid]); return {ptr, nbins_}; } // have we computed a histogram for i-th node? bool RowExists(bst_uint nid) const { const uint32_t k_max = std::numeric_limits<uint32_t>::max(); return (nid < row_ptr_.size() && row_ptr_[nid] != k_max); } // initialize histogram collection void Init(uint32_t nbins) { if (nbins_ != nbins) { nbins_ = nbins; // quite expensive operation, so let's do this only once data_.clear(); } row_ptr_.clear(); n_nodes_added_ = 0; } // create an empty histogram for i-th node void AddHistRow(bst_uint nid) { constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max(); if (nid >= row_ptr_.size()) { row_ptr_.resize(nid + 1, kMax); } CHECK_EQ(row_ptr_[nid], kMax); if (data_.size() < nbins_ * (nid + 1)) { data_.resize(nbins_ * (nid + 1)); } row_ptr_[nid] = nbins_ * n_nodes_added_; n_nodes_added_++; } private: /*! \brief number of all bins over all features */ uint32_t nbins_ = 0; /*! \brief amount of active nodes in hist collection */ uint32_t n_nodes_added_ = 0; std::vector<GradientPairT> data_; /*! \brief row_ptr_[nid] locates bin for histogram of node nid */ std::vector<size_t> row_ptr_; }; /*! * \brief Stores temporary histograms to compute them in parallel * Supports processing multiple tree-nodes for nested parallelism * Able to reduce histograms across threads in efficient way */ template<typename GradientSumT> class ParallelGHistBuilder { public: using GHistRowT = GHistRow<GradientSumT>; void Init(size_t nbins) { if (nbins != nbins_) { hist_buffer_.Init(nbins); nbins_ = nbins; } } // Add new elements if needed, mark all hists as unused // targeted_hists - already allocated hists which should contain final results after Reduce() call void Reset(size_t nthreads, size_t nodes, const BlockedSpace2d& space, const std::vector<GHistRowT>& targeted_hists) { hist_buffer_.Init(nbins_); tid_nid_to_hist_.clear(); hist_memory_.clear(); threads_to_nids_map_.clear(); targeted_hists_ = targeted_hists; CHECK_EQ(nodes, targeted_hists.size()); nodes_ = nodes; nthreads_ = nthreads; MatchThreadsToNodes(space); AllocateAdditionalHistograms(); MatchNodeNidPairToHist(); hist_was_used_.resize(nthreads * nodes_); std::fill(hist_was_used_.begin(), hist_was_used_.end(), static_cast<int>(false)); } // Get specified hist, initialize hist by zeros if it wasn't used before GHistRowT GetInitializedHist(size_t tid, size_t nid) { CHECK_LT(nid, nodes_); CHECK_LT(tid, nthreads_); size_t idx = tid_nid_to_hist_.at({tid, nid}); GHistRowT hist = hist_memory_[idx]; if (!hist_was_used_[tid * nodes_ + nid]) { InitilizeHistByZeroes(hist, 0, hist.size()); hist_was_used_[tid * nodes_ + nid] = static_cast<int>(true); } return hist; } // Reduce following bins (begin, end] for nid-node in dst across threads void ReduceHist(size_t nid, size_t begin, size_t end) { CHECK_GT(end, begin); CHECK_LT(nid, nodes_); GHistRowT dst = targeted_hists_[nid]; bool is_updated = false; for (size_t tid = 0; tid < nthreads_; ++tid) { if (hist_was_used_[tid * nodes_ + nid]) { is_updated = true; const size_t idx = tid_nid_to_hist_.at({tid, nid}); GHistRowT src = hist_memory_[idx]; if (dst.data() != src.data()) { IncrementHist(dst, src, begin, end); } } } if (!is_updated) { // In distributed mode - some tree nodes can be empty on local machines, // So we need just set local hist by zeros in this case InitilizeHistByZeroes(dst, begin, end); } } protected: void MatchThreadsToNodes(const BlockedSpace2d& space) { const size_t space_size = space.Size(); const size_t chunck_size = space_size / nthreads_ + !!(space_size % nthreads_); threads_to_nids_map_.resize(nthreads_ * nodes_, false); for (size_t tid = 0; tid < nthreads_; ++tid) { size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, space_size); if (begin < space_size) { size_t nid_begin = space.GetFirstDimension(begin); size_t nid_end = space.GetFirstDimension(end-1); for (size_t nid = nid_begin; nid <= nid_end; ++nid) { // true - means thread 'tid' will work to compute partial hist for node 'nid' threads_to_nids_map_[tid * nodes_ + nid] = true; } } } } void AllocateAdditionalHistograms() { size_t hist_allocated_additionally = 0; for (size_t nid = 0; nid < nodes_; ++nid) { int nthreads_for_nid = 0; for (size_t tid = 0; tid < nthreads_; ++tid) { if (threads_to_nids_map_[tid * nodes_ + nid]) { nthreads_for_nid++; } } // In distributed mode - some tree nodes can be empty on local machines, // set nthreads_for_nid to 0 in this case. // In another case - allocate additional (nthreads_for_nid - 1) histograms, // because one is already allocated externally (will store final result for the node). hist_allocated_additionally += std::max<int>(0, nthreads_for_nid - 1); } for (size_t i = 0; i < hist_allocated_additionally; ++i) { hist_buffer_.AddHistRow(i); } } void MatchNodeNidPairToHist() { size_t hist_total = 0; size_t hist_allocated_additionally = 0; for (size_t nid = 0; nid < nodes_; ++nid) { bool first_hist = true; for (size_t tid = 0; tid < nthreads_; ++tid) { if (threads_to_nids_map_[tid * nodes_ + nid]) { if (first_hist) { hist_memory_.push_back(targeted_hists_[nid]); first_hist = false; } else { hist_memory_.push_back(hist_buffer_[hist_allocated_additionally]); hist_allocated_additionally++; } // map pair {tid, nid} to index of allocated histogram from hist_memory_ tid_nid_to_hist_[{tid, nid}] = hist_total++; CHECK_EQ(hist_total, hist_memory_.size()); } } } } /*! \brief number of bins in each histogram */ size_t nbins_ = 0; /*! \brief number of threads for parallel computation */ size_t nthreads_ = 0; /*! \brief number of nodes which will be processed in parallel */ size_t nodes_ = 0; /*! \brief Buffer for additional histograms for Parallel processing */ HistCollection<GradientSumT> hist_buffer_; /*! * \brief Marks which hists were used, it means that they should be merged. * Contains only {true or false} values * but 'int' is used instead of 'bool', because std::vector<bool> isn't thread safe */ std::vector<int> hist_was_used_; /*! \brief Buffer for additional histograms for Parallel processing */ std::vector<bool> threads_to_nids_map_; /*! \brief Contains histograms for final results */ std::vector<GHistRowT> targeted_hists_; /*! \brief Allocated memory for histograms used for construction */ std::vector<GHistRowT> hist_memory_; /*! \brief map pair {tid, nid} to index of allocated histogram from hist_memory_ */ std::map<std::pair<size_t, size_t>, size_t> tid_nid_to_hist_; }; /*! * \brief builder for histograms of gradient statistics */ template<typename GradientSumT> class GHistBuilder { public: using GHistRowT = GHistRow<GradientSumT>; GHistBuilder() = default; GHistBuilder(size_t nthread, uint32_t nbins) : nthread_{nthread}, nbins_{nbins} {} // construct a histogram via histogram aggregation void BuildHist(const std::vector<GradientPair>& gpair, const RowSetCollection::Elem row_indices, const GHistIndexMatrix& gmat, GHistRowT hist, bool isDense); // same, with feature grouping void BuildBlockHist(const std::vector<GradientPair>& gpair, const RowSetCollection::Elem row_indices, const GHistIndexBlockMatrix& gmatb, GHistRowT hist); // construct a histogram via subtraction trick void SubtractionTrick(GHistRowT self, GHistRowT sibling, GHistRowT parent); uint32_t GetNumBins() const { return nbins_; } private: /*! \brief number of threads for parallel computation */ size_t nthread_ { 0 }; /*! \brief number of all bins over all features */ uint32_t nbins_ { 0 }; }; } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_HIST_UTIL_H_
GB_unaryop__minv_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_int32 // op(A') function: GB_tran__minv_bool_int32 // C type: bool // A type: int32_t // cast: ; // unaryop: cij = true #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
batchnorm_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: bhu@openailab.com * Update: hhchen@openailab.com */ #include <stdbool.h> #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "batchnorm_param.h" struct ref_batchnorm_param { int input_n; int input_h; int input_w; int input_c; int layout; bool iscaffe; float* scale_mean; float* scale_var_inv; float* gamma; float* beta; float in_scale; int in_zero; float out_scale; int out_zero; }; static int ref_batchnorm_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, const struct ref_batchnorm_param* param, int num_thread) { float* scale_mean = param->scale_mean; float* scale_var_inv = param->scale_var_inv; float* gamma = param->gamma; float* beta = param->beta; int img_size = param->input_c * param->input_h * param->input_w; int total_size = img_size * param->input_n; // dequant uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; float* data_fp32 = (float*) sys_malloc(total_size * sizeof(float)); for(int i = 0; i < total_size; i++) data_fp32[i] = ((float) input_uint8[i] - (float)input_zero) * input_scale; for (int n = 0; n < param->input_n; ++n) { #pragma omp parallel for num_threads(num_thread) for (int h = 0; h < param->input_h; ++h) { for (int w = 0; w < param->input_w; ++w) { for (int c = 0; c < param->input_c; ++c) { float s_mean = scale_mean[c]; float s_var = scale_var_inv[c]; float s_val1 = s_mean; float s_val2 = s_var; if (!param->iscaffe) { float s_gamma = gamma[c]; float s_beta = beta[c]; s_val1 = s_beta + s_gamma * s_mean; s_val2 = s_gamma * s_var; } int offset = 0; if (TENGINE_LAYOUT_NCHW == param->layout) { offset = n * img_size + c * param->input_h * param->input_w + h * param->input_w + w; } else { offset = n * img_size + h * param->input_w * param->input_c + w * param->input_c + c; } data_fp32[offset] = data_fp32[offset] * s_val2 + s_val1; } } } } // quant for(int i=0; i<total_size; i++) { int udata = round(data_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } return 0; } static int ref_batchnorm_fp32(float* input, float* output, const struct ref_batchnorm_param* param, int num_thread) { float* scale_mean = param->scale_mean; float* scale_var_inv = param->scale_var_inv; float* gamma = param->gamma; float* beta = param->beta; int img_size = param->input_c * param->input_h * param->input_w; for (int n = 0; n < param->input_n; ++n) { #pragma omp parallel for num_threads(num_thread) for (int h = 0; h < param->input_h; ++h) { for (int w = 0; w < param->input_w; ++w) { for (int c = 0; c < param->input_c; ++c) { float s_mean = scale_mean[c]; float s_var = scale_var_inv[c]; float s_val1 = s_mean; float s_val2 = s_var; if (!param->iscaffe) { float s_gamma = gamma[c]; float s_beta = beta[c]; s_val1 = s_beta + s_gamma * s_mean; s_val2 = s_gamma * s_var; } int offset = 0; if (TENGINE_LAYOUT_NCHW == param->layout) { offset = n * img_size + c * param->input_h * param->input_w + h * param->input_w + w; } else { offset = n * img_size + h * param->input_w * param->input_c + w * param->input_c + c; } output[offset] = input[offset] * s_val2 + s_val1; } } } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ref_batchnorm_param* batchnorm_op_param = ( struct ref_batchnorm_param* )sys_malloc(sizeof(struct ref_batchnorm_param)); memset(batchnorm_op_param, 0, sizeof(struct ref_batchnorm_param)); exec_node->ops_priv = batchnorm_op_param; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { sys_free(exec_node->ops_priv); return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* output_tensor; const struct ir_tensor* input_tensor; int channel_num; // struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); const struct ir_tensor* mean_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[3]); const struct ir_tensor* var_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[4]); ; struct ref_batchnorm_param* op_param = ( struct ref_batchnorm_param* )exec_node->ops_priv; struct batchnorm_param* batchnorm_param = ( struct batchnorm_param* )ir_node->op.param_mem; if (ir_graph->graph_layout == TENGINE_LAYOUT_NCHW) { channel_num = input_tensor->dims[1]; } else if (ir_graph->graph_layout == TENGINE_LAYOUT_NHWC) { channel_num = input_tensor->dims[3]; } float* scale_mean = ( float* )sys_malloc(channel_num * sizeof(float)); float* scale_var_inv = ( float* )sys_malloc(channel_num * sizeof(float)); const float* mean = ( const float* )mean_tensor->data; const float* var = ( const float* )var_tensor->data; float rescale_factor; float eps = batchnorm_param->eps; rescale_factor = batchnorm_param->rescale_factor ? 1 / batchnorm_param->rescale_factor : 0; for (int c = 0; c < channel_num; c++) { float tmp = sqrt(var[c] * rescale_factor + eps); scale_var_inv[c] = ( float )(1.f / tmp); tmp = rescale_factor * scale_var_inv[c]; scale_mean[c] = ( float )(-mean[c] * tmp); } float* gamma = NULL; float* beta = NULL; if (!batchnorm_param->caffe_flavor) { const struct ir_tensor* gamma_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); const struct ir_tensor* beta_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); gamma = ( float* )gamma_tensor->data; beta = ( float* )beta_tensor->data; } int layout = ir_graph->graph_layout; op_param->iscaffe = batchnorm_param->caffe_flavor; op_param->scale_mean = scale_mean; op_param->scale_var_inv = scale_var_inv; op_param->gamma = gamma; op_param->beta = beta; op_param->layout = layout; return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct ref_batchnorm_param* batchnorm_op_param = ( struct ref_batchnorm_param* )exec_node->ops_priv; void* out_data = output_tensor->data; void* input = input_tensor->data; if (TENGINE_LAYOUT_NCHW == ir_graph->graph_layout) { if (4 == input_tensor->dim_num) { batchnorm_op_param->input_n = input_tensor->dims[0]; batchnorm_op_param->input_c = input_tensor->dims[1]; batchnorm_op_param->input_h = input_tensor->dims[2]; batchnorm_op_param->input_w = input_tensor->dims[3]; } else if (3 == input_tensor->dim_num) { batchnorm_op_param->input_n = input_tensor->dims[0]; batchnorm_op_param->input_c = input_tensor->dims[1]; batchnorm_op_param->input_w = input_tensor->dims[2]; batchnorm_op_param->input_h = 1; } else { return false; } } else { if (4 == input_tensor->dim_num) { batchnorm_op_param->input_n = input_tensor->dims[0]; batchnorm_op_param->input_c = input_tensor->dims[3]; batchnorm_op_param->input_h = input_tensor->dims[1]; batchnorm_op_param->input_w = input_tensor->dims[2]; } else if (3 == input_tensor->dim_num) { batchnorm_op_param->input_n = input_tensor->dims[0]; batchnorm_op_param->input_c = input_tensor->dims[2]; batchnorm_op_param->input_w = input_tensor->dims[1]; batchnorm_op_param->input_h = 1; } else { return false; } } int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_batchnorm_fp32(input, out_data, batchnorm_op_param, exec_graph->num_thread); else if (input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_batchnorm_uint8(input_tensor, output_tensor, batchnorm_op_param, exec_graph->num_thread); return ret; } static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ref_batchnorm_param* batchnorm_op_param = ( struct ref_batchnorm_param* )exec_node->ops_priv; sys_free(batchnorm_op_param->scale_mean); sys_free(batchnorm_op_param->scale_var_inv); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = postrun, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_batchnorm_hcl_ops(void* arg) { return register_builtin_node_ops(OP_BATCHNORM, &hcl_node_ops); } static int unreg_batchnorm_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_BATCHNORM, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_batchnorm_hcl_ops); AUTO_UNREGISTER_OPS(unreg_batchnorm_hcl_ops);
mmc.c
// See the Cormen book for details of the following algorithm #include<stdio.h> #include<limits.h> #include <math.h> #include <omp.h> #define min(a,b) (((a)<(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #define floord(n,d) floor(((double)(n))/((double)(d))) #define ceild(n,d) ceil(((double)(n))/((double)(d))) int N = 1500, DIM = 1502; #include "mem.h" #define pluto 3 #define traco 2 #define tstile 4 int **s; int minsq(int a,int b,int i,int j,int k) { if(a < b){ s[i][j] = k; return a; } else return b; } // Matrix Ai has dimension p[i-1] x p[i] for i = 1..n int MatrixChainOrder(int p[], int n, int kind) { /* For simplicity of the program, one extra row and one extra column are allocated in m[][]. 0th row and 0th column of m[][] are not used */ int** m = mem(); s = mem(); int i, j, k, L, q; /* m[i,j] = Minimum number of scalar multiplications needed to compute the matrix A[i]A[i+1]...A[j] = A[i..j] where dimension of A[i] is p[i-1] x p[i] */ double start = omp_get_wtime(); // cost is zero when multiplying one matrix. for (i=1; i<n; i++) m[i][i] = 0; // L is chain length. if(kind==-1) for (L=2; L<n; L++) { for (i=1; i<n-L+1; i++) { j = i+L-1; m[i][j] = INT_MAX; for (k=i; k<=j-1; k++) { // q = cost/scalar multiplications q = m[i][k] + m[k+1][j] + p[i-1]*p[k]*p[j]; if (q < m[i][j]){ m[i][j] = q; s[i][j] = k; } } } } if(kind==1) for (L=2; L<n; L++) { for (i=1; i<n-L+1; i++) { m[i][i+L-1] = INT_MAX; for (k=i; k<=i+L-1-1; k++) { m[i][i+L-1] = minsq(m[i][k] + m[k+1][i+L-1] + p[i-1]*p[k]*p[i+L-1], m[i][i+L-1], i, i+L-1, k); } } } if(kind==pluto) { int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if (n >= 3) { lbp=0; ubp=floord(n-1,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=0;t3<=min(floord(n-2,16),floord(-16*t2+n,16));t3++) { for (t4=max(1,16*t3);t4<=min(min(n-2,-16*t2+n),16*t3+15);t4++) { lbv=max(2,16*t2); ubv=min(16*t2+15,-t4+n); #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { m[t4][t4+t5-1] = INT_MAX;; } } } } for (t1=0;t1<=floord(n-1,8);t1++) { lbp=ceild(t1,2); ubp=min(floord(n,16),t1); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t2) shared(t1) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(2,16*t1-16*t2);t3<=min(min(n-1,16*t2+14),16*t1-16*t2+15);t3++) { for (t4=max(16*t2,t3+1);t4<=min(n,16*t2+15);t4++) { for (t5=-t3+t4;t5<=t4-2;t5++) { m[(-t3+t4)][(-t3+t4)+t3-1] = minsq(m[(-t3+t4)][t5] + m[t5+1][(-t3+t4)+t3-1] + p[(-t3+t4)-1]*p[t5]*p[(-t3+t4)+t3-1], m[(-t3+t4)][(-t3+t4)+t3-1], (-t3+t4),(-t3+t4)+t3-1,t5);; } } } } } } } /* End of CLooG code */ if(kind==traco) { int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; lbp=0; ubp=floord(n-1,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=0;t3<=min(floord(n-2,16),floord(-16*t2+n,16));t3++) { for (t4=max(1,16*t3);t4<=min(min(n-2,-16*t2+n),16*t3+15);t4++) { lbv=max(2,16*t2); ubv=min(16*t2+15,-t4+n); #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { m[t4][t4+t5-1] = INT_MAX;; } } } } int c1,c3,c5,c9,c7,c11; for( c1 = 0; c1 < n - 2; c1 += 1) #pragma omp parallel for shared(c1) private(c3,c5,c9,c11) for( c3 = 0; c3 <= (n - c1 - 3) / 128; c3 += 1) for( c5 = 0; c5 <= c1 / 16; c5 += 1) for( c9 = 128 * c3 + 1; c9 <= min(n - c1 - 2, 128 * c3 + 128); c9 += 1) for( c11 = 16 * c5 + c9; c11 <= min(c1 + c9, 16 * c5 + c9 + 15); c11 += 1) m[c9][c9+(c1+2)-1] = minsq(m[c9][c11] + m[c11+1][c9+(c1+2)-1] + p[c9-1]*p[c11]*p[c9+(c1+2)-1], m[c9][c9+(c1+2)-1], c9,c9+(c1+2)-1,c11); if(1==0) for( c1 = 0; c1 <= floord(n - 3, 16); c1 += 1) #pragma omp parallel for for( c3 = 0; c3 <= -c1 + (n - 3) / 16; c3 += 1) for( c5 = 0; c5 <= c1; c5 += 1) { if (c1 >= c5 + 1) { for( c9 = 16 * c1 + 16 * c3 + 3; c9 <= min(n, 16 * c1 + 16 * c3 + 18); c9 += 1) for( c11 = -16 * c1 + 16 * c5 + c9 - 2; c11 <= -16 * c1 + 16 * c5 + c9 + 13; c11 += 1) m[(-16*c1+c9-2)][(-16*c1+c9-2)+(16*c1+2)-1] = minsq(m[(-16*c1+c9-2)][c11] + m[c11+1][(-16*c1+c9-2)+(16*c1+2)-1] + p[(-16*c1+c9-2)-1]*p[c11]*p[(-16*c1+c9-2)+(16*c1+2)-1], m[(-16*c1+c9-2)][(-16*c1+c9-2)+(16*c1+2)-1], (-16*c1+c9-2),(-16*c1+c9-2)+(16*c1+2)-1,c11); } else for( c7 = 16 * c1 + 2; c7 <= min(16 * c1 + 17, n - c3 - 1); c7 += 1) for( c9 = max(16 * c1 + 16 * c3 + 3, c7 + 1); c9 <= min(n, 16 * c1 + 16 * c3 + 18); c9 += 1) { if (c9 >= 16 * c3 + c7 + 1) { if (c7 >= 16 * c1 + 3) for( c11 = -c7 + c9; c11 < 16 * c1 - c7 + c9; c11 += 1) m[(-c7+c9)][(-c7+c9)+c7-1] = minsq(m[(-c7+c9)][c11] + m[c11+1][(-c7+c9)+c7-1] + p[(-c7+c9)-1]*p[c11]*p[(-c7+c9)+c7-1], m[(-c7+c9)][(-c7+c9)+c7-1], (-c7+c9),(-c7+c9)+c7-1,c11); for( c11 = 16 * c1 - c7 + c9; c11 < c9 - 1; c11 += 1) m[(-c7+c9)][(-c7+c9)+c7-1] = minsq(m[(-c7+c9)][c11] + m[c11+1][(-c7+c9)+c7-1] + p[(-c7+c9)-1]*p[c11]*p[(-c7+c9)+c7-1], m[(-c7+c9)][(-c7+c9)+c7-1], (-c7+c9),(-c7+c9)+c7-1,c11); } else for( c11 = -c7 + c9; c11 < c9 - 1; c11 += 1) m[(-c7+c9)][(-c7+c9)+c7-1] = minsq(m[(-c7+c9)][c11] + m[c11+1][(-c7+c9)+c7-1] + p[(-c7+c9)-1]*p[c11]*p[(-c7+c9)+c7-1], m[(-c7+c9)][(-c7+c9)+c7-1], (-c7+c9),(-c7+c9)+c7-1,c11); } } } if(kind == tstile){ int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; int c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c0; lbp=0; ubp=floord(n-1,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=0;t3<=min(floord(n-2,16),floord(-16*t2+n,16));t3++) { for (t4=max(1,16*t3);t4<=min(min(n-2,-16*t2+n),16*t3+15);t4++) { lbv=max(2,16*t2); ubv=min(16*t2+15,-t4+n); #pragma ivdep #pragma vector always for (t5=lbv;t5<=ubv;t5++) { m[t4][t4+t5-1] = INT_MAX;; } } } } for( c0 = 0; c0 <= floord(n - 3, 16); c0 += 1) #pragma omp parallel for private(c3,c1,c4,c10,c6) shared(c0) for( c1 = 0; c1 <= c0; c1 += 1) for( c3 = 16 * c0 + 16 * c1 + 5; c3 <= min(min(2 * n - 16 * c0 + 16 * c1 - 1, n + 16 * c1 + 17), 16 * c0 + 16 * c1 + 50); c3 += 1) for( c4 = max(c0 - c1, -2 * c1 + (c3 - 2) / 16 - 2); c4 <= min((n - 2) / 16, -c1 + (16 * c0 + 16 * c1 + c3 + 12) / 32); c4 += 1) for( c6 = max(max(max(16 * c1 + 2, -n + c3), -8 * c4 + c3 / 2 - 7), -8 * c0 + 8 * c1 + (c3 + 1) / 2 - 8); c6 <= min(min(16 * c1 + 17, c3 - 16 * c4 - 2), -8 * c0 + 8 * c1 + (c3 + 1) / 2 - 1); c6 += 1) for( c10 = max(16 * c4, c3 - 2 * c6); c10 <= min(16 * c4 + 15, c3 - c6 - 2); c10 += 1) m[(c3-2*c6)][(c3-2*c6)+c6-1] = minsq(m[(c3-2*c6)][c10] + m[c10+1][(c3-2*c6)+c6-1] + p[(c3-2*c6)-1]*p[c10]*p[(c3-2*c6)+c6-1], m[(c3-2*c6)][(c3-2*c6)+c6-1], (c3-2*c6), (c3-2*c6)+c6-1, c10); if(1==0) { for( c0 = 0; c0 <= floord(n - 3, 16); c0 += 1) #pragma omp parallel for for( c1 = 0; c1 <= c0; c1 += 1) { for( c3 = 16 * c1 + 2; c3 <= min(n - 16 * c0 + 16 * c1 - 1, 16 * c1 + 17); c3 += 1) for( c4 = c0 - c1; c4 <= min(c0 - c1 + 1, (n - c3) / 16); c4 += 1) { if (c0 == 0 && c1 == 0 && c4 == 0) for( c6 = 2; c6 < (c3 + 1) / 2; c6 += 1) for( c10 = c3 - 2 * c6; c10 < c3 - c6 - 1; c10 += 1) m[(c3-2*c6)][(c3-2*c6)+c6-1] = minsq(m[(c3-2*c6)][c10] + m[c10+1][(c3-2*c6)+c6-1] + p[(c3-2*c6)-1]*p[c10]*p[(c3-2*c6)+c6-1], m[(c3-2*c6)][(c3-2*c6)+c6-1], (c3-2*c6), (c3-2*c6)+c6, c10); for( c8 = max(16 * c0 - 16 * c1 + 1, 16 * c4); c8 <= min(min(16 * c0 - 16 * c1 + 16, n - c3), 16 * c4 + 15); c8 += 1) m[c8][c8+c3-1] = INT_MAX; } for( c3 = max(5, n); c3 <= min(17, 2 * n - 1); c3 += 1) for( c6 = max(2, -n + c3); c6 < (c3 + 1) / 2; c6 += 1) for( c10 = c3 - 2 * c6; c10 < c3 - c6 - 1; c10 += 1) m[(c3-2*c6)][(c3-2*c6)+c6-1] = minsq(m[(c3-2*c6)][c10] + m[c10+1][(c3-2*c6)+c6-1] + p[(c3-2*c6)-1]*p[c10]*p[(c3-2*c6)+c6-1], m[(c3-2*c6)][(c3-2*c6)+c6-1], (c3-2*c6), (c3-2*c6)+c6, c10); for( c3 = max(16 * c0 + 16 * c1 + 5, 16 * c1 + 18); c3 <= min(min(2 * n - 16 * c0 + 16 * c1 - 1, n + 16 * c1 + 17), 16 * c0 + 16 * c1 + 50); c3 += 1) for( c4 = max(c0 - c1, -2 * c1 + (c3 - 2) / 16 - 2); c4 <= min((n - 2) / 16, -c1 + (16 * c0 + 16 * c1 + c3 + 12) / 32); c4 += 1) for( c6 = max(max(max(16 * c1 + 2, -n + c3), -8 * c4 + c3 / 2 - 7), -8 * c0 + 8 * c1 + (c3 + 1) / 2 - 8); c6 <= min(min(16 * c1 + 17, c3 - 16 * c4 - 2), -8 * c0 + 8 * c1 + (c3 + 1) / 2 - 1); c6 += 1) for( c10 = max(16 * c4, c3 - 2 * c6); c10 <= min(16 * c4 + 15, c3 - c6 - 2); c10 += 1) m[(c3-2*c6)][(c3-2*c6)+c6-1] = minsq(m[(c3-2*c6)][c10] + m[c10+1][(c3-2*c6)+c6-1] + p[(c3-2*c6)-1]*p[c10]*p[(c3-2*c6)+c6-1], m[(c3-2*c6)][(c3-2*c6)+c6-1], (c3-2*c6), (c3-2*c6)+c6, c10); } } } double stop = omp_get_wtime(); printf("%.4f\n",stop - start); return m[1][n-1]; } int main(int argc, char *argv[]){ int num_proc=1, i; if(argc > 1) num_proc = atoi(argv[1]); omp_set_num_threads(num_proc); int kind=1; if(argc > 2) N = atoi(argv[2]); DIM = N+2; if(argc > 3) kind = atoi(argv[3]); int *p = (int *) malloc(DIM * sizeof(int)); for(i=0; i<N; i++) p[i] = (i % 20) + 1; //int p[] = {1, 2, 3, 4}; //N = sizeof(p)/sizeof(p[0]); printf("Minimum number of multiplications is %d \n", MatrixChainOrder(p, N, kind)); return 0; }
GB_binop__ldexp_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ldexp_fp32 // A.*B function (eWiseMult): GB_AemultB__ldexp_fp32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__ldexp_fp32 // C+=b function (dense accum): GB_Cdense_accumb__ldexp_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ldexp_fp32 // C=scalar+B GB_bind1st__ldexp_fp32 // C=scalar+B' GB_bind1st_tran__ldexp_fp32 // C=A+scalar GB_bind2nd__ldexp_fp32 // C=A'+scalar GB_bind2nd_tran__ldexp_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = ldexpf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ldexpf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LDEXP || GxB_NO_FP32 || GxB_NO_LDEXP_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ldexp_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ldexp_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ldexp_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ldexp_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ldexp_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ldexp_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = ldexpf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ldexp_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = ldexpf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ldexpf (x, aij) ; \ } GrB_Info GB_bind1st_tran__ldexp_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ldexpf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__ldexp_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__max_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int64) // A.*B function (eWiseMult): GB (_AemultB_08__max_int64) // A.*B function (eWiseMult): GB (_AemultB_02__max_int64) // A.*B function (eWiseMult): GB (_AemultB_04__max_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int64) // A*D function (colscale): GB (_AxD__max_int64) // D*A function (rowscale): GB (_DxB__max_int64) // C+=B function (dense accum): GB (_Cdense_accumB__max_int64) // C+=b function (dense accum): GB (_Cdense_accumb__max_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int64) // C=scalar+B GB (_bind1st__max_int64) // C=scalar+B' GB (_bind1st_tran__max_int64) // C=A+scalar GB (_bind2nd__max_int64) // C=A'+scalar GB (_bind2nd_tran__max_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT64 || GxB_NO_MAX_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
findSubGraphs.c
#include "defs.h" double findSubGraphs(graph* G, edge* maxIntWtList, int maxIntWtListSize) { mcsim_skip_instrs_begin(); VERT_T* S; LONG_T *start; char* visited; LONG_T *pSCount; #ifdef _OPENMP omp_lock_t* vLock; #endif LONG_T phase_num, numPhases; LONG_T count; double elapsed_time = get_seconds(); numPhases = SubGraphPathLength + 1; #ifdef _OPENMP omp_set_num_threads(NUM_THREADS); #pragma omp parallel { #endif VERT_T *pS, *pSt; LONG_T pCount, pS_size; LONG_T v, w, search_num; int tid, nthreads; LONG_T j, k, vert, n; #ifdef _OPENMP LONG_T i; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif n = G->n; pS_size = n/nthreads + 1; pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T)); assert(pS != NULL); if (tid == 0) { S = (VERT_T *) malloc(n*sizeof(VERT_T)); visited = (char *) calloc(n, sizeof(char)); start = (LONG_T *) calloc((numPhases+2), sizeof(LONG_T)); pSCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif mcsim_skip_instrs_end(); for (search_num=0; search_num<maxIntWtListSize; search_num++) { mcsim_skip_instrs_begin(); #ifdef _OPENMP #pragma omp barrier #endif /* Run path-limited BFS in parallel */ if (tid == 0) { free(visited); visited = (char *) calloc(n, sizeof(char)); S[0] = maxIntWtList[search_num].startVertex; S[1] = maxIntWtList[search_num].endVertex; visited[S[0]] = (char) 1; visited[S[1]] = (char) 1; count = 2; phase_num = 1; start[0] = 0; start[1] = 1; start[2] = 2; } mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp barrier #endif while (phase_num <= SubGraphPathLength) { pCount = 0; #ifdef _OPENMP #pragma omp for #endif for (vert=start[phase_num]; vert<start[phase_num+1]; vert++) { v = S[vert]; #ifdef PERSISTENT mcsim_skip_instrs_begin(); VERT_T *undolog_pS, *redolog_pS; undolog_pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T)); redolog_pS = (VERT_T *) malloc(pS_size*sizeof(VERT_T)); mcsim_skip_instrs_end(); #endif for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { w = G->endV[j]; if (v == w) continue; #ifdef _OPENMP int myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif if (visited[w] != (char) 1) { visited[w] = (char) 1; if (pCount == pS_size) { /* Resize pS */ pSt = (VERT_T *)malloc(2*pS_size*sizeof(VERT_T)); memcpy(pSt, pS, pS_size*sizeof(VERT_T)); free(pS); pS = pSt; pS_size = 2*pS_size; } #ifdef PERSISTENT mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_pS[pCount] = pS[pCount]; #endif // UNDOLOG #ifdef REDOLOG redolog_pS[pCount] = w; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // PERSISTENT pS[pCount++] = w; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } #endif } // make sure undolog and redolog data structures are not discarded by compiler #ifdef PERSISTENT mcsim_skip_instrs_begin(); printf("%d\n", (int)((sizeof undolog_pS) + (sizeof redolog_pS))); mcsim_skip_instrs_end(); #endif // PERSISTENT } #ifdef _OPENMP #pragma omp barrier #endif pSCount[tid+1] = pCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { pSCount[0] = start[phase_num+1]; for(k=1; k<=nthreads; k++) { pSCount[k] = pSCount[k-1] + pSCount[k]; } start[phase_num+2] = pSCount[nthreads]; count = pSCount[nthreads]; phase_num++; } #ifdef _OPENMP #pragma omp barrier #endif #ifdef PERSISTENT mcsim_skip_instrs_begin(); VERT_T *undolog_S, *redolog_S; undolog_S = (VERT_T *) malloc(n*sizeof(VERT_T)); redolog_S = (VERT_T *) malloc(n*sizeof(VERT_T)); mcsim_skip_instrs_end(); #endif // PERSISTENT for (k = pSCount[tid]; k < pSCount[tid+1]; k++) { #ifdef PERSISTENT mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_S[k] = S[k]; #endif // UNDOLOG #ifdef REDOLOG redolog_S[k] = pS[k-pSCount[tid]]; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // PERSISTENT S[k] = pS[k-pSCount[tid]]; } #ifdef PERSISTENT // make sure undolog and redolog data structures are not discarded by compiler mcsim_skip_instrs_begin(); printf("%d\n", (int)((sizeof undolog_S) + (sizeof redolog_S))); mcsim_skip_instrs_end(); #endif // PERSISTENT #ifdef _OPENMP #pragma omp barrier #endif } /* End of search */ mcsim_skip_instrs_begin(); if (tid == 0) { fprintf(stderr, "Search from <%ld, %ld>, number of vertices visited:" " %ld\n", (long) S[0], (long) S[1], (long) count); } } /* End of outer loop */ free(pS); #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #pragma omp barrier #endif if (tid == 0) { free(S); free(start); free(visited); free(pSCount); #ifdef _OPENMP free(vLock); #endif } #ifdef _OPENMP } #endif elapsed_time = get_seconds() - elapsed_time; mcsim_skip_instrs_end(); return elapsed_time; }
GB_subref_phase0.c
//------------------------------------------------------------------------------ // GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_subref.h" #define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen) //------------------------------------------------------------------------------ // GB_find_Ap_start_end //------------------------------------------------------------------------------ // Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector // A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense // vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then // pA and pA_end are set to -1 to denote an empty list. The resulting pointers // are then returned in Ap_start [kC] and Ap_end [kC]. static inline void GB_find_Ap_start_end ( // input, not modified const int64_t kA, const int64_t *GB_RESTRICT Ap, const int64_t *GB_RESTRICT Ai, const int64_t avlen, const int64_t imin, const int64_t imax, const int64_t kC, const int64_t nzombies, // output: Ap_start [kC] and Ap_end [kC]: int64_t *GB_RESTRICT Ap_start, int64_t *GB_RESTRICT Ap_end ) { //-------------------------------------------------------------------------- // get A(:,kA) //-------------------------------------------------------------------------- int64_t pA = GBP (Ap, kA, avlen) ; int64_t pA_end = GBP (Ap, kA+1, avlen) ; int64_t ajnz = pA_end - pA ; //-------------------------------------------------------------------------- // trim it to A(imin:imax,kA) //-------------------------------------------------------------------------- if (ajnz == avlen) { //---------------------------------------------------------------------- // A (:,kA) is dense; use pA and pA_end as-is //---------------------------------------------------------------------- ; } else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin) { //---------------------------------------------------------------------- // intersection of A(:,kA) and imin:imax is empty //---------------------------------------------------------------------- pA = -1 ; pA_end = -1 ; } else { //---------------------------------------------------------------------- // A (:,kA) is sparse, with at least one entry //---------------------------------------------------------------------- // trim the leading part of A(:,kA) if (GB_Ai (pA) < imin) { bool found, is_zombie ; int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH_ZOMBIE (imin, Ai, pA, pright, found, nzombies, is_zombie) ; } // trim the trailing part of A (:,kA) if (imin == imax) { if (GB_Ai (pA) == imin) { // found the the single entry A (i,kA) pA_end = pA + 1 ; } else { // A (i,kA) has not been found pA = -1 ; pA_end = -1 ; } } else if (imax < GB_Ai (pA_end-1)) { bool found, is_zombie ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH_ZOMBIE (imax, Ai, pleft, pright, found, nzombies, is_zombie) ; pA_end = (found) ? (pleft + 1) : pleft ; } #ifdef GB_DEBUG ajnz = pA_end - pA ; if (ajnz > 0 && Ap != NULL) { // A(imin:imax,kA) is now in Ai [pA:pA_end-1] ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ; ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ; ASSERT (imin <= GB_Ai (pA)) ; ASSERT (GB_Ai (pA_end-1) <= imax) ; } #endif } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // The result [pA:pA_end-1] defines the range of entries that need to be // accessed for constructing C(:,kC). Ap_start [kC] = pA ; Ap_end [kC] = pA_end ; } //------------------------------------------------------------------------------ // GB_subref_phase0 //------------------------------------------------------------------------------ #define GB_FREE_WORK \ GB_FREE (Count) ; GrB_Info GB_subref_phase0 ( // output int64_t *GB_RESTRICT *p_Ch, // Ch = C->h hyperlist, or NULL standard int64_t *GB_RESTRICT *p_Ap_start, // A(:,kA) starts at Ap_start [kC] int64_t *GB_RESTRICT *p_Ap_end, // ... and ends at Ap_end [kC] - 1 int64_t *p_Cnvec, // # of vectors in C bool *p_need_qsort, // true if C must be sorted int *p_Ikind, // kind of I int64_t *p_nI, // length of I int64_t Icolon [3], // for GB_RANGE, GB_STRIDE int64_t *p_nJ, // length of J // input, not modified const GrB_Matrix A, const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc. const int64_t ni, // length of I, or special const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc. const int64_t nj, // length of J, or special // const bool must_sort, // true if C must be returned sorted GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A for subref phase 0", GB0) ; ASSERT (!GB_IS_BITMAP (A)) ; // GB_bitmap_subref is used instead ASSERT (p_Ch != NULL) ; ASSERT (p_Ap_start != NULL) ; ASSERT (p_Ap_end != NULL) ; ASSERT (p_Cnvec != NULL) ; ASSERT (p_nJ != NULL) ; ASSERT (p_Ikind != NULL) ; ASSERT (p_nI != NULL) ; ASSERT (Icolon != NULL) ; ASSERT (I != NULL) ; ASSERT (J != NULL) ; GrB_Info info ; (*p_Ch ) = NULL ; (*p_Ap_start ) = NULL ; (*p_Ap_end ) = NULL ; (*p_Cnvec ) = 0 ; (*p_need_qsort) = false ; (*p_Ikind ) = 0 ; (*p_nI ) = 0 ; (*p_nJ ) = 0 ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t *GB_RESTRICT Ap = A->p ; // Ap (but not A->p) may be trimmed int64_t *GB_RESTRICT Ah = A->h ; // Ah (but not A->h) may be trimmed int64_t *GB_RESTRICT Ai = A->i ; int64_t anvec = A->nvec ; // may be trimmed int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; int64_t nzombies = A->nzombies ; //-------------------------------------------------------------------------- // check the properties of I and J //-------------------------------------------------------------------------- // C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1 int64_t nI, nJ, Jcolon [3] ; int Ikind, Jkind ; GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ; GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ; bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ; int64_t imin, imax, jmin, jmax ; info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon, &I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ; if (info != GrB_SUCCESS) { // I invalid return (info) ; } info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon, &J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ; if (info != GrB_SUCCESS) { // J invalid return (info) ; } bool need_qsort = I_unsorted ; //-------------------------------------------------------------------------- // determine if C is empty //-------------------------------------------------------------------------- bool C_empty = (nI == 0 || nJ == 0) ; //-------------------------------------------------------------------------- // trim the hyperlist of A //-------------------------------------------------------------------------- // Ah, Ap, and anvec are modified to include just the vectors in range // jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap // pointers, and the scalar anvec. If J is ":", then jmin is zero and // jmax is avdim-1, so there is nothing to trim from Ah. If C is empty, // then Ah and Ap will not be accessed at all, so this can be skipped. bool A_is_hyper = (Ah != NULL) ; if (A_is_hyper && !C_empty) { //---------------------------------------------------------------------- // trim the leading end of Ah so that it starts with jmin:... //---------------------------------------------------------------------- if (jmin > 0) { bool found ; int64_t kleft = 0 ; int64_t kright = anvec-1 ; GB_SPLIT_BINARY_SEARCH (jmin, Ah, kleft, kright, found) ; Ah += kleft ; Ap += kleft ; anvec -= kleft ; } //---------------------------------------------------------------------- // trim the trailing end of Ah so that it ends with ..:jmax //---------------------------------------------------------------------- if (jmax < avdim-1) { bool found ; int64_t kleft = 0 ; int64_t kright = anvec-1 ; GB_SPLIT_BINARY_SEARCH (jmax, Ah, kleft, kright, found) ; anvec = (found) ? (kleft + 1) : kleft ; } // Ah has been trimmed ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax)); } // Ah may now be empty, after being trimmed C_empty = C_empty || (anvec == 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = 1, ntasks = 1 ; int max_ntasks = nthreads_max * 8 ; int64_t *GB_RESTRICT Count = NULL ; // size max_ntasks+1 #define GB_GET_NTHREADS_AND_NTASKS(work) \ { \ nthreads = GB_nthreads (work, chunk, nthreads_max) ; \ ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; \ ntasks = GB_IMIN (ntasks, work) ; \ ntasks = GB_IMAX (ntasks, 1) ; \ } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- Count = GB_CALLOC (max_ntasks+1, int64_t) ; if (Count == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute Cnvec and determine the format of Ch //-------------------------------------------------------------------------- // Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC] // if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and // jC == kC. jC is in the range 0 to nJ-1. int64_t *GB_RESTRICT Ch = NULL ; int64_t *GB_RESTRICT Ap_start = NULL ; int64_t *GB_RESTRICT Ap_end = NULL ; int64_t Cnvec = 0 ; int64_t jbegin = Jcolon [GxB_BEGIN] ; int64_t jinc = Jcolon [GxB_INC ] ; if (C_empty) { //---------------------------------------------------------------------- // C is an empty hypersparse matrix //---------------------------------------------------------------------- ; } else if (!A_is_hyper) { //---------------------------------------------------------------------- // both C and A are standard matrices //---------------------------------------------------------------------- Cnvec = nJ ; GB_GET_NTHREADS_AND_NTASKS (nJ) ; } else if (Jkind == GB_ALL || Jkind == GB_RANGE) { //---------------------------------------------------------------------- // J is ":" or jbegin:jend //---------------------------------------------------------------------- // Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec. // so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed. Cnvec = anvec ; ASSERT (Cnvec <= nJ) ; GB_GET_NTHREADS_AND_NTASKS (anvec) ; } else if (Jkind == GB_STRIDE && anvec < nJ * 64) { //---------------------------------------------------------------------- // J is jbegin:jinc:jend, but J is large //---------------------------------------------------------------------- // The case for Jkind == GB_STRIDE can be done by either this method, // or the one below. This takes O(anvec) time, and the one below // takes O(nj*log2(anvec)), so use this method if anvec < nj * 64. // Ch is a list of length Cnvec, where Cnvec is the length of // the intersection of Ah and jbegin:jinc:jend. // count the length of Ch Cnvec = 0 ; GB_GET_NTHREADS_AND_NTASKS (anvec) ; // scan all of Ah and check each entry if it appears in J int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end, my_Cnvec = 0 ; GB_PARTITION (kA_start, kA_end, anvec, (jinc > 0) ? tid : (ntasks-tid-1), ntasks) ; for (int64_t kA = kA_start ; kA < kA_end ; kA++) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { my_Cnvec++ ; } } Count [tid] = my_Cnvec ; } GB_cumsum (Count, ntasks, NULL, 1) ; Cnvec = Count [ntasks] ; } else // Jkind == GB_LIST or GB_STRIDE { //---------------------------------------------------------------------- // J is an explicit list, or jbegin:jinc:end //---------------------------------------------------------------------- // Ch is an explicit list: the intersection of Ah and J // count the length of Ch Cnvec = 0 ; GB_GET_NTHREADS_AND_NTASKS (nJ) ; // scan all of J and check each entry if it appears in Ah int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jC_start, jC_end, my_Cnvec = 0 ; GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; bool found ; int64_t kA = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ; if (found) my_Cnvec++ ; } Count [tid] = my_Cnvec ; } GB_cumsum (Count, ntasks, NULL, 1) ; Cnvec = Count [ntasks] ; } //-------------------------------------------------------------------------- // allocate Ch, Ap_start, and Ap_end //-------------------------------------------------------------------------- C_empty = C_empty || (Cnvec == 0) ; // C is hypersparse if A is hypersparse, or if C is empty bool C_is_hyper = A_is_hyper || C_empty ; if (C_is_hyper) { Ch = GB_MALLOC (Cnvec, int64_t) ; if (Ch == NULL) { GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } } if (Cnvec > 0) { Ap_start = GB_MALLOC (Cnvec, int64_t) ; Ap_end = GB_MALLOC (Cnvec, int64_t) ; if (Ap_start == NULL || Ap_end == NULL) { // out of memory GB_FREE_WORK ; GB_FREE (Ch) ; GB_FREE (Ap_start) ; GB_FREE (Ap_end) ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // create Ch, Ap_start, and Ap_end //-------------------------------------------------------------------------- // For the (kC)th vector of C, which corresponds to the (kA)th vector of A, // pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range // of entries in A(imin:imax,kA). if (C_empty) { //---------------------------------------------------------------------- // C is an empty hypersparse matrix //---------------------------------------------------------------------- ; } else if (!A_is_hyper) { //---------------------------------------------------------------------- // both C and A are standard matrices //---------------------------------------------------------------------- int64_t jC ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (jC = 0 ; jC < nJ ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax, jC, nzombies, Ap_start, Ap_end) ; } } else if (Jkind == GB_ALL || Jkind == GB_RANGE) { //---------------------------------------------------------------------- // J is ":" or jbegin:jend //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is a shifted copy of the trimmed // Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed. int64_t kC ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (kC = 0 ; kC < Cnvec ; kC++) { int64_t kA = kC ; int64_t jA = Ah [kA] ; int64_t jC = jA - jmin ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; } } else if (Jkind == GB_STRIDE && anvec < nJ * 64) { //---------------------------------------------------------------------- // J is jbegin:jinc:jend where jinc may be positive or negative //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is constructed by scanning all // vectors in Ah [0..anvec-1] and checking if they appear in the // jbegin:jinc:jend sequence. if (jinc > 0) { int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end ; GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ; int64_t kC = Count [tid] ; for (int64_t kA = kA_start ; kA < kA_end ; kA++) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { int64_t jC = (jA - jbegin) / jinc ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } else { int tid; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end ; GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ; int64_t kC = Count [tid] ; for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { int64_t jC = (jA - jbegin) / jinc ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } } else // Jkind == GB_LIST or GB_STRIDE { //---------------------------------------------------------------------- // J is an explicit list, or jbegin:jinc:jend //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is constructed by scanning the // list J, or the entire jbegin:jinc:jend sequence. Each vector is // then found in Ah, via binary search. int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jC_start, jC_end ; GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ; int64_t kC = Count [tid] ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; bool found ; int64_t kA = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ; if (found) { ASSERT (jA == Ah [kA]) ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } //-------------------------------------------------------------------------- // check result //-------------------------------------------------------------------------- #ifdef GB_DEBUG for (int64_t kC = 0 ; kC < Cnvec ; kC++) { // jC is the (kC)th vector of C = A(I,J) int64_t jC = GBH (Ch, kC) ; int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; // jA is the corresponding (kA)th vector of A. int64_t kA = 0 ; int64_t pright = A->nvec - 1 ; int64_t pA_start_all, pA_end_all ; bool found = GB_lookup (A->h != NULL, A->h, A->p, A->vlen, &kA, pright, jA, &pA_start_all, &pA_end_all) ; if (found && A->h != NULL) { ASSERT (jA == A->h [kA]) ; } int64_t pA = Ap_start [kC] ; int64_t pA_end = Ap_end [kC] ; int64_t ajnz = pA_end - pA ; if (ajnz == avlen) { // A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector. // C(:,kC) will have exactly nI entries. ASSERT (pA == pA_start_all) ; ASSERT (pA_end == pA_end_all ) ; ; } else if (ajnz > 0) { // A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1] ASSERT (imin <= GB_Ai (pA)) ; ASSERT (GB_Ai (pA_end-1) <= imax) ; ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ; } else { // A(imin:imax,kA) and C(:,kC) are empty ; } } #endif //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*p_Ch ) = Ch ; (*p_Ap_start ) = Ap_start ; (*p_Ap_end ) = Ap_end ; (*p_Cnvec ) = Cnvec ; (*p_need_qsort) = need_qsort ; (*p_Ikind ) = Ikind ; (*p_nI ) = nI ; (*p_nJ ) = nJ ; return (GrB_SUCCESS) ; }
mask-unpack.c
/* { dg-do compile } */ /* { dg-options "-mavx512bw -mavx512dq -mno-stackrealign -O3 -fopenmp-simd -fdump-tree-vect-details" } */ /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 10 "vect" } } */ /* { dg-final { scan-assembler-not "maskmov" } } */ #define LENGTH 1000 long l1[LENGTH], l2[LENGTH]; int i1[LENGTH], i2[LENGTH]; short s1[LENGTH], s2[LENGTH]; char c1[LENGTH], c2[LENGTH]; double d1[LENGTH], d2[LENGTH]; int test1 () { int i; #pragma omp simd safelen(16) for (i = 0; i < LENGTH; i++) if (i1[i] > i2[i]) l1[i] = 1; } int test2 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) if (s1[i] > s2[i]) i1[i] = 1; } int test3 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) if (c1[i] > c2[i]) s1[i] = 1; } int test4 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) if (c1[i] > c2[i]) d1[i] = 1; } int test5 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) l1[i] = i1[i] > i2[i] ? 1 : 2; } int test6 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) i1[i] = s1[i] > s2[i] ? 1 : 2; } int test7 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) s1[i] = c1[i] > c2[i] ? 1 : 2; } int test8 (int n) { int i; #pragma omp simd safelen(32) for (i = 0; i < LENGTH; i++) d1[i] = c1[i] > c2[i] ? 1 : 2; } int test9 (int n) { int i; #pragma omp simd safelen(16) for (i = 0; i < LENGTH; i++) if (c1[i] > c2[i] && i1[i] < i2[i]) l1[i] = 1; } int test10 (int n) { int i; #pragma omp simd safelen(16) for (i = 0; i < LENGTH; i++) if (c1[i] > c2[i] && i1[i] < i2[i]) l1[i] = 1; else l1[i] = 2; }
resample.h
#ifndef RESAMPLE_H_ #define RESAMPLE_H_ #include <omp.h> #include <torch/extension.h> #include "nn/common/resample.h" namespace mapped_conv { namespace nn { namespace cpu { template <typename T> void ResampleToMap2D(const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, torch::Tensor data_out) { const T *data_in_ptr = data_in.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); T *data_out_ptr = data_out.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleToMap2D(index, data_in_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, interpolation, data_out_ptr); } } template <typename T> void ResampleFromMap2D(const int64_t num_kernels, torch::Tensor data_out, torch::Tensor sample_map, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, torch::Tensor data_in) { const T *data_out_ptr = data_out.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); T *data_in_ptr = data_in.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleFromMap2D(index, data_out_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, interpolation, data_in_ptr); } } // -------------------------------------------- // -------------------------------------------- template <typename T> void ResampleToMap2DWeighted( const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map, torch::Tensor interp_weights, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, const int64_t num_interp_pts, torch::Tensor data_out) { const T *data_in_ptr = data_in.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); const T *interp_weights_ptr = interp_weights.data<T>(); T *data_out_ptr = data_out.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ interp_weights_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleToMap2DWeighted( index, data_in_ptr, sample_map_ptr, interp_weights_ptr, channels, in_height, in_width, out_height, out_width, interpolation, num_interp_pts, data_out_ptr); } } template <typename T> void ResampleFromMap2DWeighted( const int64_t num_kernels, torch::Tensor data_out, torch::Tensor sample_map, torch::Tensor interp_weights, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t interpolation, const int64_t num_interp_pts, torch::Tensor data_in) { const T *data_out_ptr = data_out.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); const T *interp_weights_ptr = interp_weights.data<T>(); T *data_in_ptr = data_in.data<T>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ interp_weights_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleFromMap2DWeighted( index, data_out_ptr, sample_map_ptr, interp_weights_ptr, channels, in_height, in_width, out_height, out_width, interpolation, num_interp_pts, data_in_ptr); } } // -------------------------------------------- // -------------------------------------------- template <typename T> void ResampleToMap2DVoting(const int64_t num_kernels, torch::Tensor data_in, torch::Tensor sample_map, const int64_t channels, const int64_t in_height, const int64_t in_width, const int64_t out_height, const int64_t out_width, const int64_t numCandidates, torch::Tensor data_out) { const int64_t *data_in_ptr = data_in.data<int64_t>(); const int64_t *sample_map_ptr = sample_map.data<int64_t>(); int64_t *data_out_ptr = data_out.data<int64_t>(); int64_t index; #pragma omp parallel for shared(data_in_ptr, sample_map_ptr, \ data_out_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::ResampleToMap2DVoting(index, data_in_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, numCandidates, data_out_ptr); } } } // namespace cpu } // namespace nn } // namespace mapped_conv #endif
rnn_impl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file rnn_impl.h * \brief * \author Shu Zhang */ #ifndef MXNET_OPERATOR_RNN_IMPL_H_ #define MXNET_OPERATOR_RNN_IMPL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <algorithm> #include <random> #include <map> #include <vector> #include <string> #include <utility> #include "./math.h" #include "./math_functions-inl.h" #include "./operator_common.h" #include "./mshadow_op.h" #include "./linalg.h" namespace mxnet { namespace op { template <typename DType> inline DType sigmoid(DType x) { return 1.0f / (1.0f + exp(-x)); } template <typename DType> inline DType relu(DType x) { return x > 0.0f ? static_cast<float>(x) : 0.0f; } template <typename DType> void LstmForwardTrainingSingleLayer(DType* ws, DType* rs, bool state_outputs, bool bid, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, const Tensor<cpu, 2, DType>& cx, const Tensor<cpu, 3, DType>& y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H)); const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); DType* c_ptr = bid ? rs + T * N * H * 7 : rs; Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta = 0.0; const index_t cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (index_t i = 0; i < T; ++i) { index_t t = bid ? T - 1 - i : i; linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { index_t j = jk / H; index_t k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[i - 1][j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); h[j][k] = ht; // reserve y[t][j][k + offset] = ht; c[i][j][k] = ct; ifgo[i][j][k][0] = it; ifgo[i][j][k][1] = ft; ifgo[i][j][k][2] = gt; ifgo[i][j][k][3] = ot; if (i == T - 1 && state_outputs) { hy_ptr[jk] = ht; cy_ptr[jk] = ct; } } } } template <typename DType> void LstmForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr, const float dropout, std::mt19937& rnd_engine) { // NOLINT(runtime/references) DType* dropout_random = rs; DType* rs2 = dropout_random + (L - 1) * D * T * N * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t cell_size = N * H; int idx = 0; // state & cell state's idx; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = 0; i < L; ++i) { const index_t input_size = i ? H * D : I; const index_t w_size = (input_size + H) * H * 4; Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D)); LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; if (dropout > 0.0f) { std::uniform_real_distribution<float> distribution(0, 1); for (index_t j = 0; j < T * N * H * D; j++) { if (distribution(rnd_engine) < dropout) { dropout_random[i * T * N * H * D + j] = 0; y.dptr_[j] = 0; } else { dropout_random[i * T * N * H * D + j] = 1.0f - dropout; y.dptr_[j] = y.dptr_[j] / (1.0f - dropout); } } } x_ptr = y.dptr_; rs2 += r_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = (rs2 + y_offset)[i]; } } template <typename DType> void LstmForwardInferenceSingleLayer(DType* ws, bool state_outputs, bool bid, const index_t T, const index_t N, const index_t I, const int H, const int P, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, const Tensor<cpu, 2, DType>& cx, const Tensor<cpu, 3, DType>& y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H))); Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1)); if (P > 0) whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4)); Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1)); if (P > 0) r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P)); const int offset = bid ? H : 0; const int proj_offset = bid ? P : 0; const DType alpha = 1.0; const DType beta = 0.0; const index_t cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (index_t i = 0; i < T; ++i) { index_t t = bid ? T - 1 - i : i; if (P > 0) { linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true); } else { linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); } #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { int j = jk / H; int k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); if (P == 0) y[t][j][k + offset] = ht; if (i == T - 1 && state_outputs) { if (P == 0) hy_ptr[jk] = ht; cy_ptr[jk] = ct; } else { c[j][k] = ct; } h[j][k] = ht; } if (P > 0) { linalg_gemm(h, whr, r, alpha, beta, false, true); #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif #pragma omp parallel for num_threads(omp_threads) for (int j = 0; j < N; ++j) { std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType)); } #pragma GCC diagnostic pop } } } template <typename DType> void LstmForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, const int P, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr) { const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t cell_size = N * H; const index_t projection_size = (P ? P : H) * N; DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2; DType* y_cur_ptr = y_ptr; int idx = 0; // state & cell state's idx; bool flag = L % 2 ? false : true; for (int i = 0; i < L; ++i) { const index_t input_size = i ? (P ? P : H) * D : I; index_t w_size = (input_size + (P ? P : H)) * H * 4; if (P > 0) { w_size += P * H; } // If bidirectional, need space to save current layer output y. if (D == 2) { y_cur_ptr = flag ? y_tmp_ptr : y_ptr; flag = !flag; } Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D)); LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, P, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); // If bidirectional, then calculate the reverse direction's forward result. if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += projection_size; cy_ptr += cell_size; } LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, P, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } // Don't need to move pointer in the last layer. if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; x_ptr = y_cur_ptr; ++idx; if (state_outputs) { hy_ptr += projection_size; cy_ptr += cell_size; } } } } template <typename DType> void LstmBackwardSingleLayer(DType* ws, DType* rs, DType* tmp_buf, bool bid, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, const Tensor<cpu, 2, DType>& cx, const Tensor<cpu, 3, DType>& y, const Tensor<cpu, 3, DType>& dy, const Tensor<cpu, 2, DType>& dx, const Tensor<cpu, 2, DType>& dhx, const Tensor<cpu, 2, DType>& dcx, DType* dhy_ptr, DType* dcy_ptr, DType* w_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I)); Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4)); Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4)); DType* c_ptr = bid ? rs + T * N * H * 7 : rs; const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H * 4 * H; ++i) { dwh.dptr_[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 4 * H; ++i) { dbx.dptr_[i] = 0; dbh.dptr_[i] = 0; } } Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H)); Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta0 = 0.0; const DType beta1 = 1.0; const DType beta2 = 2.0; const index_t cell_size = N * H; if (dhy_ptr != nullptr) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dh.dptr_[i] = dhy_ptr[i]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dh.dptr_[i] = 0; } } if (dcy_ptr != nullptr) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dc.dptr_[i] = dcy_ptr[i]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dc.dptr_[i] = 0; } } for (index_t i = T - 1; i >= 0; --i) { index_t t = bid ? T - 1 - i : i; index_t tnext = bid ? t + 1 : t - 1; const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx; const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx; const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx; const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx; #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { index_t j = jk / H; index_t k = jk % H; DType tc = tanh(c[i][j][k]); DType it = ifgo[i][j][k][0]; DType ft = ifgo[i][j][k][1]; DType gt = ifgo[i][j][k][2]; DType ot = ifgo[i][j][k][3]; dh[j][k] += dy[t][j][k + offset]; dc[j][k] += dh[j][k] * ot * (1 - tc * tc); difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it); difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft); difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt); difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot); if (req_statecell != kNullOp || i > 0) { dcnext[j][k] = dc[j][k] * ft; } if (i) { htmp[j][k] = y[tnext][j][k + offset]; } } Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4)); if (req_state != kNullOp || i > 0) { linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false); } if (req_params != kNullOp) { if (req_params != kAddTo) { linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false); } else { linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false); // generate dwx every time step for AddTo Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4)); linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false); } } } Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4)); if (req_data != kNullOp) { linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false); } if (req_params != kNullOp && req_params != kAddTo) { linalg_gemm(dyx, x, dwx, alpha, beta0, true, false); } const index_t row = T * N; const index_t col = H * 4; if (req_params != kNullOp) { if (req_params != kAddTo) { for (index_t i = 0; i < row; ++i) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { dbx[j] += dyx[i][j]; dbh[j] = dbx[j]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < col * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { for (index_t i = 0; i < N; ++i) { tmp_dbx[j][t] += dyx[t * N + i][j]; tmp_dbh[j][t] = tmp_dbx[j][t]; } } #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { dbx[j] += tmp_dbx[j][t] + dbx[j]; dbh[j] += tmp_dbh[j][t] + dbh[j]; } } } } } template <typename DType> void LstmBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dcy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dcx_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell, const float dropout) { DType* dropout_random = rs + (L - 1) * D * T * N * H; DType* rs2 = rs + (L - 1) * D * T * N * H; DType* tmp_buf = ws; DType* ws2 = tmp_buf + 8 * T * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t w_size1 = (I + H) * H * 4; // first layer const index_t w_size2 = (D * H + H) * H * 4; // other layers const index_t cell_size = N * H; const index_t y_size = T * N * H * D; DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3; for (int i = L - 1; i >= 0; --i) { const index_t input_size = i ? H * D : I; const index_t w_size = i ? w_size2 : w_size1; int idx = i * D; DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr; DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr; DType* db_cur_ptr = db_ptr + i * b_size * D; DType* rs_cur_ptr = rs2 + i * r_size; DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr; DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr; Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D)); Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D)); Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size)); LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); if (D == 2) { w_cur_ptr += w_size; dw_cur_ptr += w_size; db_cur_ptr += b_size; ++idx; dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr; dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr; LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); // Prevent overwritting dy while calculating dx in left2right layer const int loop_iteration = (L - 1) - i; dy_tmp_ptr = loop_iteration % 2 ? dy_tmp_ptr - y_size : dy_tmp_ptr + y_size; } if (dropout > 0.0f && i > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < T * N * D * H; j++) { if (dropout_random[j] == 0) { dx.dptr_[j] = 0; } else { dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout); } } } dy_ptr = dx.dptr_; } } template <typename DType> void GruForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gemmC2 + N * 3 * H; DType* zt = rt + N * H; DType* nt = zt + N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + 3 * H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + 3 * H * 2 : nullptr; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr) { DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardInferenceSingleLayer<DType>( ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l); hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } } template <typename DType> void GruForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gateR; DType* zt = gateZ; DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + 3 * H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + 3 * H * 2 : nullptr; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_Mnh = Mnh + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * 3 * H; DType* Mnht = Mnh + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { rt = back_gateR + (T - 1 - t) * N * H; zt = back_gateZ + (T - 1 - t) * N * H; nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, const float dropout, std::mt19937& rnd_engine) { // NOLINT(runtime/references) DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateR_l = rs; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* dropout_random = Mnh_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* ws2 = tmp_buf + D * N * H; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { std::uniform_real_distribution<float> distribution(0, 1); for (index_t i = 0; i < T * N * I; i++) { if (distribution(rnd_engine) < dropout) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout; y_tmp[i] = y_tmp[i] / (1.0f - dropout); } } } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l); gateR_l = gateR_l + T * D * N * H; gateZ_l = gateZ_l + T * D * N * H; gateN_l = gateN_l + T * D * N * H; Mnh_l = Mnh_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = y_l[i]; } } template <typename DType> void GruBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state) { DType* dyt; DType* ht1; // [N, D, H] DType* rt; DType* zt; DType* nt; DType* dat; DType* dart; DType* dar = ws; // [T, N, 3 * H] DType* da = dar + T * N * 3 * H; // [T, N, 3 * H] DType* dht1 = da + T * N * 3 * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* Mnht = Mnh; DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_Mnht = Mnh + T * N * H; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_dwx = dwx + I * 3 * H + H * 3 * H; DType* back_dwh = dwh + I * 3 * H + H * 3 * H; DType* back_dbx = dbx + 3 * H * 2; DType* back_dbh = dbh + 3 * H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H * 3 * H; ++i) { dwh[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * 3 * H; ++i) { dbx[i] = 0; dbh[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (index_t t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; Mnht = Mnh + t * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int nid = i * 3 * H + 2 * H + j; int zid = i * 3 * H + H + j; int rid = i * 3 * H + j; int id = i * H + j; dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; dht1[id] = dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N * T; ++j) { dbx[i] += da[j * 3 * H + i]; dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T * 3; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] += tmp_dbh[i][t] + dbh[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (index_t t = 0; t < T; ++t) { if (t == T - 1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } rt = back_gateR + t * N * H; zt = back_gateZ + t * N * H; nt = back_gateN + t * N * H; back_Mnht = Mnh + (T + t) * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t nid = i * 3 * H + 2 * H + j; index_t zid = i * 3 * H + H + j; index_t rid = i * 3 * H + j; index_t id = i * H + j; dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; back_dht1[id] = back_dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N * T; ++j) { back_dbx[i] += da[j * 3 * H + i]; back_dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T * 3; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] += tmp_dbh[i][t] + back_dbh[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H * D; ++i) { dhx[i] = dht1[i]; } } } template <typename DType> void GruBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state, const float dropout) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H * 3; DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* gateR_l = rs + (L - 1) * T * D * N * H; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* dropout_random = Mnh_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1) ? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H * 3; } else { wh_l = wh_l + (D * H) * H * 3; } DType* dhy_l = nullptr; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1) ? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* dwh_l = nullptr; if (L == 1) { dwh_l = dwx_l + I * H * 3; } else { dwh_l = dwx_l + (D * H) * H * 3; } DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2; DType* dbh_l = dbx_l + 3 * H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); index_t inputsize = I; DType* y_tmp = y_l - T * N * H * D; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state); if (dropout > 0.0f && l > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { if (dropout_random[i] == 0) { dx_l[i] = 0; } else { dx_l[i] = dx_l[i] / (1.0f - dropout); } } } if (l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { dy_l[i] = dx_l[i]; } gateR_l = gateR_l - T * D * N * H; gateZ_l = gateZ_l - T * D * N * H; gateN_l = gateN_l - T * D * N * H; Mnh_l = Mnh_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_tmp - T * N * H * D; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * 3 * D; wh_l = wx_l + inputsize * 3 * H; dwx_l = dwx_l - (inputsize + H) * H * 3 * D; dwh_l = dwx_l + inputsize * 3 * H; } else { wx_l = wx_l - (I + H) * H * 3 * D; wh_l = wx_l + I * 3 * H; dwx_l = dwx_l - (I + H) * H * 3 * D; dwh_l = dwx_l + I * 3 * H; } dbx_l = dbx_l - D * 3 * H * 2; dbh_l = dbx_l + 3 * H; } } } template <typename DType> void VanillaRNNForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr, int mode) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, H] DType* gemmC2 = gemmC1 + D * T * N * H; // N * H DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + H * 2 : nullptr; DType* back_gemmC1 = gemmC1 + T * N * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H)); // x * wx.T : [T * N, I] * [I, H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } else { ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } else { back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void VanillaRNNForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, int mode) { DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* bh = bx + H; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l, mode); hy_l = hy_l + D * N * H; bx_l = bx_l + H * D * 2; bh_l = bh_l + H * D * 2; wx_l = wx_l + I * H * D + H * H * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * H; } } template <typename DType> void VanillaRNNForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateN, DType* y_ptr, DType* hy_ptr, int mode) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, H] DType* gemmC2 = gemmC1 + D * T * N * H; // N * H DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + H * 2 : nullptr; DType* back_gateN = gateN + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H)); // x * wx.T : [T * N, I] * [I, H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } else { nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]; ht[i * D * H + j] = relu(nt[tb + j]); } } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } else { nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]; back_ht[i * D * H + j] = relu(nt[tb + j]); } } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void VanillaRNNForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, const float dropout, int mode, std::mt19937& rnd_engine) { // NOLINT(runtime/references) DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* bh = bx + H; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateN_l = rs; DType* y_l = gateN_l + L * T * D * N * H; DType* dropout_random = y_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* ws2 = tmp_buf + D * N * H; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { std::uniform_real_distribution<float> distribution(0, 1); for (index_t i = 0; i < T * N * I; i++) { if (distribution(rnd_engine) < dropout) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout; y_tmp[i] = y_tmp[i] / (1.0f - dropout); } } } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateN_l, y_l, hy_l, mode); gateN_l = gateN_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + H * D * 2; bh_l = bh_l + H * D * 2; wx_l = wx_l + I * H * D + H * H * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * H; } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = y_l[i]; } } template <typename DType> void VanillaRNNBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType>& x, const Tensor<cpu, 2, DType>& hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateN, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state, int mode) { DType* dyt; DType* ht1; // [N, D, H] DType* dart; DType* nt; DType* dar = ws; // [T, N, H] DType* dht1 = dar + T * N * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_dwx = dwx + I * H + H * H; DType* back_dwh = dwh + I * H + H * H; DType* back_dbx = dbx + H * 2; DType* back_dbh = dbh + H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H * H; ++i) { dwh[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H; ++i) { dbx[i] = 0; dbh[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (index_t t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } nt = gateN + t * N * H; dart = dar + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t id = i * H + j; if (mode == 1) { dart[id] = dht1[id] * (1 - nt[id] * nt[id]); } else { dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f; } dht1[id] = 0; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, H] * [H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I)); linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [H, H] = [H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, H] = [1, N] * [N, H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N * T; ++j) { dbx[i] += dar[j * H + i]; dbh[i] = dbx[i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += dar[t * N * H + j * H + i]; tmp_dbh[i][t] = tmp_dbx[i][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] = dbx[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, H] * [H, I] Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [H, I] = [H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I)); linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (index_t t = 0; t < T; ++t) { if (t == T - 1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } nt = back_gateN + t * N * H; dart = dar + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t id = i * H + j; if (mode == 1) { dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]); } else { dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f; } back_dht1[id] = 0; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, H] * [H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [H, H] = [H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I)); linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, H] = [1, N] * [N, H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N * T; ++j) { back_dbx[i] += dar[j * H + i]; back_dbh[i] = back_dbx[i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += dar[t * N * H + j * H + i]; tmp_dbh[i][t] = tmp_dbx[i][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] = back_dbx[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, H] * [H, I] Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [H, I] = [H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I)); linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H * D; ++i) { dhx[i] = dht1[i]; } } } template <typename DType> void VanillaRNNBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state, const float dropout, int mode) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H; DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* gateN_l = rs + (L - 1) * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* dropout_random = y_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1) ? wx : wx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H; } else { wh_l = wh_l + (D * H) * H; } DType* dhy_l = nullptr; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1) ? dwx : dwx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H; DType* dwh_l = nullptr; if (L == 1) { dwh_l = dwx_l + I * H; } else { dwh_l = dwx_l + (D * H) * H; } DType* dbx_l = dbx + (L - 1) * D * H * 2; DType* dbh_l = dbx_l + H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); index_t inputsize = I; DType* y_tmp = y_l - T * N * H * D; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state, mode); if (dropout > 0.0f && l > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { if (dropout_random[i] == 0) { dx_l[i] = 0; } else { dx_l[i] = dx_l[i] / (1.0f - dropout); } } } if (l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { dy_l[i] = dx_l[i]; } gateN_l = gateN_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_l; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * D; wh_l = wx_l + inputsize * H; dwx_l = dwx_l - (inputsize + H) * H * D; dwh_l = dwx_l + inputsize * H; } else { wx_l = wx_l - (I + H) * H * D; wh_l = wx_l + I * H; dwx_l = dwx_l - (I + H) * H * D; dwh_l = dwx_l + I * H; } dbx_l = dbx_l - D * H * 2; dbh_l = dbx_l + H; } } } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_RNN_IMPL_H_
27_omp_softcounter.c
// clang-format off // RUN: %run %s --omp --call-filter 2>&1 | %filecheck %s --check-prefix=CHECK-TSAN // RUN: %run %s -o -O2 --omp --call-filter 2>&1 | %filecheck %s --check-prefix=CHECK-TSAN // RUN: %run %s -o -O2 --omp --call-filter 2>&1 | %filecheck %s // RUN: %run %s --omp --call-filter 2>&1 | %filecheck %s // REQUIRES: openmp && softcounter // clang-format on #include <stdlib.h> void ptr(const int n) { // Sections can sometimes cause Max. Heap Allocs to be 1 (instead of more likely 2), if // thread execution order always frees one pointer before malloc of other. #pragma omp parallel sections num_threads(2) { #pragma omp section for (int i = 1; i <= n; i++) { double* d = (double*)malloc(sizeof(double) * n); free(d); } #pragma omp section for (int i = 1; i <= n; i++) { double* e = (double*)malloc(2 * sizeof(double) * n); free(e); } } } int main(int argc, char** argv) { const int n = 100; ptr(n); // CHECK-TSAN-NOT: ThreadSanitizer // CHECK: [Trace] TypeART Runtime Trace // CHECK-NOT: [Error] // CHECK: Alloc Stats from softcounters // CHECK-NEXT: Total heap : 200 , 200 , - // CHECK-NEXT: Total stack : 0 , 0 , - // CHECK-NEXT: Total global : 0 , 0 , - // CHECK-NEXT: Max. Heap Allocs : {{[1-2]}} , - , - // CHECK-NEXT: Max. Stack Allocs : 0 , - , - // CHECK-NEXT: Addresses checked : 0 , - , - // CHECK-NEXT: Distinct Addresses checked : 0 , - , - // CHECK-NEXT: Addresses re-used : 0 , - , - // CHECK-NEXT: Addresses missed : 0 , - , - // CHECK-NEXT: Distinct Addresses missed : 0 , - , - // CHECK-NEXT: Total free heap : 200 , 200 , - // CHECK-NEXT: Total free stack : 0 , 0 , - // CHECK-NEXT: OMP Stack/Heap/Free : 0 , 200 , 200 // CHECK-NEXT: Null/Zero/NullZero Addr : 0 , 0 , 0 // CHECK-NEXT: User-def. types : 0 , - , - // CHECK-NEXT: Estimated memory use (KiB) : {{[0-9]+}} , - , - // CHECK-NEXT: Bytes per node map/stack : 96 , 8 , - // CHECK-NEXT: {{(#|-)+}} // CHECK-NEXT: Allocation type detail (heap, stack, global) // CHECK: {{(#|-)+}} // CHECK-NEXT: Free allocation type detail (heap, stack) // CHECK-NEXT: 6 : 200 , 0 , double // CHECK: Per-thread counter values (2 threads) // CHECK-NEXT: Thread Heap Allocs : 100 , 100 // CHECK-NEXT: Thread Heap Arrays : 100 , 100 // CHECK-NEXT: Thread Heap Allocs Free : 100 , 100 // CHECK-NEXT: Thread Heap Arrays Free : 100 , 100 // CHECK-NEXT: Thread Stack Allocs : 0 , 0 // CHECK-NEXT: Thread Stack Arrays : 0 , 0 // CHECK-NEXT: Thread Max. Stack Allocs : 0 , 0 // CHECK-NEXT: Thread Stack Allocs Free : 0 , 0 // CHECK-NEXT: Thread Stack Array Free : 0 , 0 return 0; }
Ccorrelation_p.c
#include <Python.h> #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include "numpy/arrayobject.h" #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <ctype.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /* This program mainly comes from http://bioinfadmin.cs.ucl.ac.uk/downloads/PSICOV/psicov21.c I correct a minor mistake and make the memory more safety. */ #define FALSE 0 #define TRUE 1 #define SQR(x) ((x)*(x)) #define MAX(x,y) ((x)>(y)?(x):(y)) #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAXSEQLEN 5000 #define MINEFSEQS (seqlen) /* Convert AA letter to numeric code (0-21) */ int aanum(int ch) { const static int aacvs[] ={999, 0, 3, 4, 3, 6, 13, 7, 8, 9, 21, 11, 10, 12, 2, 21, 14, 5, 1, 15, 16, 21, 19, 17, 21, 18, 6}; return (isalpha(ch) ? aacvs[ch & 31] : 20); } /* Allocate matrix */ void* allocmat(int rows, int columns, int size) { int i,j; void **p, *rp; rp = malloc(rows * sizeof(void *) + sizeof(int)); if (rp == NULL) return NULL; *((int *)rp) = rows; p = rp + sizeof(int); for (i = 0; i < rows; i++) if ((p[i] = calloc(columns, size)) == NULL){ for (j=0;j<i;j++) free(p[j]); free((void *)(p-sizeof(int))); return NULL; } return p; } /* Allocate vector */ void* allocvec(int columns, int size) { void *p; p = calloc(columns, size); if (p == NULL) return NULL; return p; } /* LASSO*/ #define EPS (1.1e-15) #define BIG (1e9) int glassofast(const int n, double **S, double **L, const double thr, const int maxit, int approxflg, int warm, double **X, double **W) { /* This subroutine computes the L1 regularized covariance matrix estimate using the algorithm described in the paper: J. Friedman, T. Hastie, R. Tibshirani: Sparse inverse covariance estimation with the graphical lasso Biostatistics, 9(3):432-441, July 2008. This code is adapted from the Fortran code described in the following report: M. A. Sustik & B. Calderhead: GLASSOFAST: An efficient GLASSO implementation Technical Report TR-12-29, University of Texas at Austin NOTE: that when multiple threads are used, we gain a huge time saving by avoiding full thread synchronisation when updating elements of the W (covariance) matrix. In multithreaded mode, the order of updates to the W matrix at each iteration will depend on the order in which threads complete. In practice, this hardly matters, because the algorithm is iterative, and in testing still converges to within 6 d.p. of the non-threaded code. If a very small degree of non-deterministic behaviour really worries you, then set the maximum number of threads to 1 (or compile without OpenMP). */ int i, j, ii, iter, jj; double a, b, c, delta, dlx, dw, shr, sum, thrlasso, tmp, wd[MAXSEQLEN*21], wxj[MAXSEQLEN*21]; for (shr=ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) shr += fabs(S[ii][jj]); for (i=0; i<n; i++) shr -= fabs(S[i][i]); if (shr == 0.0) { /* S is diagonal. */ for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) W[ii][jj] = X[ii][jj] = 0.0; for (i=0; i<n; i++) W[i][i] = W[i][i] + L[i][i]; for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) X[ii][jj] = 0.0; for (i=0; i<n; i++) X[i][i] = 1.0 / MAX(W[i][i], EPS); return 0; } shr *= thr/(n-1); thrlasso = shr/n; if (thrlasso < 2*EPS) thrlasso = 2*EPS; if (!warm) { for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) { W[ii][jj] = S[ii][jj]; X[ii][jj] = 0.0; } } else { for (i=0; i<n; i++) { for (ii=0; ii<n; ii++) X[i][ii] = -X[i][ii]/X[i][i]; X[i][i] = 0.0; } } for (i=0; i<n; i++) { wd[i] = S[i][i] + L[i][i]; W[i][i] = wd[i]; } for (iter = 1; iter<=maxit; iter++) { dw = 0.0; #ifdef _OPENMP #pragma omp parallel for default(shared) private(i,j,ii,wxj,a,b,c,dlx,delta,sum) #endif for (j=0; j<n; j++) { for (ii=0; ii<n; ii++) wxj[ii] = 0.0; for (i=0; i<n; i++) if (X[j][i] != 0.0) for (ii=0; ii<n; ii++) wxj[ii] += W[i][ii] * X[j][i]; for (;;) { dlx = 0.0; for (i=0; i<n; i++) { if (i != j && L[j][i] < BIG) { a = S[j][i] - wxj[i] + wd[i] * X[j][i]; b = fabs(a) - L[j][i]; if (b <= 0.0) c = 0.0; else if (a >= 0.0) c = b / wd[i]; else c = -b / wd[i]; delta = c - X[j][i]; if (delta != 0.0 && (!approxflg || fabs(delta) > 1e-6)) { X[j][i] = c; for (ii=0; ii<n; ii++) wxj[ii] += W[i][ii] * delta; if (fabs(delta) > dlx) dlx = fabs(delta); } } } if (dlx < thrlasso) break; } wxj[j] = wd[j]; for (sum=ii=0; ii<n; ii++) sum += fabs(wxj[ii] - W[j][ii]); #ifdef _OPENMP #pragma omp critical #endif if (sum > dw) dw = sum; for (ii=0; ii<n; ii++) W[j][ii] = wxj[ii]; for (ii=0; ii<n; ii++) W[ii][j] = wxj[ii]; } if (dw <= shr) break; } for (i=0; i<n; i++) { for (sum=ii=0; ii<n; ii++) sum += X[i][ii] * W[i][ii]; tmp = 1.0 / (wd[i] - sum); for (ii=0; ii<n; ii++) X[i][ii] = -tmp * X[i][ii]; X[i][i] = tmp; } for (i=0; i<n-1; i++) { for (ii=i+1; ii<n; ii++) { X[i][ii] = 0.5 * (X[i][ii] + X[ii][i]); X[ii][i] = X[i][ii]; } } return iter; } /* Perform Cholesky decomposition on matrix */ int test_cholesky(double **a, const int n) { int i, j, k, status=0; double sum; static double *diag; if (diag == NULL) diag = (double *)allocvec(n, sizeof(double)); if (diag==NULL) return 2; for (i=0; i<n; i++) { if (!status) for (j=i; j<n; j++) { sum = a[i][j]; for (k=i-1; k >= 0; k--) sum -= a[i][k]*a[j][k]; if (i == j) { if (sum <= 0.0) status = 1; diag[i] = sqrt(sum); } else a[j][i] = sum / diag[i]; } } return status; } struct sc_entry { double sc; int i, j; } *sclist; static PyObject *msapsicov(PyObject *self, PyObject *args, PyObject *kwargs) { PyArrayObject *msa, *psicov; int approxflg, shrinkflg, overrideflg, rawscflg, apcflg, minseqsep, maxthread, pseudoc; double rhodefault, targfnzero, thresh, idthresh, maxgapf; char *blockfn = NULL; int *wtcount; double *weight, **pa, **pcmat, *pcsum; char **aln=NULL; int a, b, i, j, k, ndim, maxit=10000, initflg=0, npair, nnzero, ncon; double wtsum, smean, lambda, lastfnzero, trialrho, rfact, score, fnzero, pcmean, pc, scsum, scsumsq, mean, sd, zscore, ppv; FILE *ifp; static char *kwlist[] = {"msa", "psicov", "approxflg", "shrinkflg", "overrideflg", "rawscflg", "apcflg", "rhodefault", "targfnzero", "thresh", "idthresh", "pseudoc", "minseqsep", "blockfn", "maxgapf", "maxthread", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|iiiiiddddiisdi", kwlist, &msa, &psicov, &approxflg, &shrinkflg, &overrideflg, &rawscflg, &apcflg, &rhodefault,&targfnzero,&thresh,&idthresh, &pseudoc,&minseqsep, &blockfn, &maxgapf, &maxthread )) return NULL; /* make sure to have a contiguous and well-behaved array */ msa = PyArray_GETCONTIGUOUS(msa); /* check dimensions */ long nseqs = PyArray_DIMS(msa)[0], seqlen = PyArray_DIMS(msa)[1]; /* get pointers to data */ char *seq = (char *) PyArray_DATA(msa); /*size: number x length */ double *mut = (double *) PyArray_DATA(psicov); if (maxthread!=-1) #ifdef _OPENMP omp_set_num_threads(maxthread); #endif aln = (char **)allocvec(nseqs, sizeof(char *)); if (aln==NULL){ return Py_BuildValue("Oi", Py_None, 1); } weight = (double *)allocvec(nseqs, sizeof(double)); if (weight==NULL){ free(aln); return Py_BuildValue("Oi", Py_None, 1); } wtcount = (int *)allocvec(nseqs, sizeof(int)); if (wtcount==NULL){ free(aln); free(weight); return Py_BuildValue("Oi", Py_None, 1); } if (!(aln[0] = (char *)malloc(seqlen))){ free(aln); free(weight); free(wtcount); return Py_BuildValue("Oi", Py_None, 1); } for (j=0; j<seqlen; j++) aln[0][j] = aanum(seq[j]); for (i=1; i<nseqs; i++) { if (!(aln[i] = (char *)malloc(seqlen))){ for (j=0;j<i;j++) free(aln[j]); free(aln); free(weight); free(wtcount); return Py_BuildValue("Oi", Py_None, 1); } for (j=0; j<seqlen; j++) aln[i][j] = aanum(seq[i*seqlen+j]); } /* Calculate sequence weights (use openMP/pthreads if available) */ if (idthresh < 0.0) { double meanfracid = 0.0; #ifdef _OPENMP #pragma omp parallel for default(shared) private(j,k) reduction(+:meanfracid) #endif for (i=0; i<nseqs; i++) for (j=i+1; j<nseqs; j++) { int nids; double fracid; for (nids=k=0; k<seqlen; k++) nids += (aln[i][k] == aln[j][k]); fracid = (double)nids / seqlen; meanfracid += fracid; } meanfracid /= 0.5 * nseqs * (nseqs - 1.0); idthresh = MIN(0.6, 0.38 * 0.32 / meanfracid); } #ifdef _OPENMP #pragma omp parallel for default(shared) private(j,k) #endif for (i=0; i<nseqs; i++) for (j=i+1; j<nseqs; j++) { int nthresh = (int)(seqlen * idthresh); for (k=0; nthresh > 0 && k<seqlen; k++) nthresh -= (aln[i][k] != aln[j][k]); if (nthresh > 0) { #ifdef _OPENMP #pragma omp critical #endif { wtcount[i]++; wtcount[j]++; } } } for (wtsum=i=0; i<nseqs; i++) wtsum += (weight[i] = 1.0 / (1 + wtcount[i])); if (wtsum < seqlen && !overrideflg){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); return Py_BuildValue("Oid", Py_None, 2, wtsum); } pa = (double **)allocmat(seqlen, 21, sizeof(double)); if (pa==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); return Py_BuildValue("Oi", Py_None, 1); } /* Calculate singlet frequencies with pseudocount */ for (i=0; i<seqlen; i++) { for (a=0; a<21; a++) pa[i][a] = pseudoc; for (k=0; k<nseqs; k++) { a = aln[k][i]; if (a < 21) pa[i][a] += weight[k]; } for (a=0; a<21; a++) pa[i][a] /= pseudoc * 21.0 + wtsum; } double **cmat, **rho, **ww, **wwi, **tempmat; ndim = seqlen * 21; cmat = (double **)allocmat(ndim, ndim, sizeof(double)); if (cmat==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } tempmat = (double **)allocmat(ndim, ndim, sizeof(double)); if (tempmat==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } /* Form the covariance matrix */ #ifdef _OPENMP #pragma omp parallel for default(shared) private(j,k,a,b) #endif for (i=0; i<seqlen; i++) for (j=i; j<seqlen; j++) { double pab[21][21]; for (a=0; a<21; a++) for (b=0; b<21; b++) if (i == j) pab[a][b] = (a == b) ? pa[i][a] : 0.0; else pab[a][b] = pseudoc / 21.0; if (i != j) { for (k=0; k<nseqs; k++) { a = aln[k][i]; b = aln[k][j]; if (a < 21 && b < 21) pab[a][b] += weight[k]; } for (a=0; a<21; a++) for (b=0; b<21; b++) pab[a][b] /= pseudoc * 21.0 + wtsum; } for (a=0; a<21; a++) for (b=0; b<21; b++) if (i != j || a == b) cmat[i*21+a][j*21+b] = cmat[j*21+b][i*21+a] = pab[a][b] - pa[i][a] * pa[j][b]; } /* Shrink sample covariance matrix towards shrinkage target F = Diag(1,1,1,...,1) * smean */ int checkflag=0; if (shrinkflg) { for (smean=i=0; i<ndim; i++) smean += cmat[i][i]; smean /= (double)ndim; lambda = 0.2; for (;;) { for (i=0; i<ndim; i++) memcpy(tempmat[i], cmat[i], ndim*sizeof(double)); /* Test if positive definite using Cholesky decomposition */ int testc=test_cholesky(tempmat, ndim); if (testc==2){ checkflag=1; break; } if (!testc) break; #ifdef _OPENMP #pragma omp parallel for default(shared) private(j,a,b) #endif for (i=0; i<seqlen; i++) for (j=0; j<seqlen; j++) for (a=0; a<21; a++) for (b=0; b<21; b++) if (i != j) cmat[i*21+a][j*21+b] *= 1.0 - lambda; else if (a == b) cmat[i*21+a][j*21+b] = smean * lambda + (1.0 - lambda) * cmat[i*21+a][j*21+b]; } } if (checkflag){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } rho = (double **)allocmat(ndim, ndim, sizeof(double)); if (rho==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } ww = (double **)allocmat(ndim, ndim, sizeof(double)); if (ww==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); for(j=0;j<ndim;j++) free(rho[j]); free((void *)rho - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } wwi = (double **)allocmat(ndim, ndim, sizeof(double)); if (wwi==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); for(j=0;j<ndim;j++) free(rho[j]); free((void *)rho - sizeof(int)); for(j=0;j<ndim;j++) free(ww[j]); free((void *)ww - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } lastfnzero=0.0; /* Guess at a reasonable starting rho value if undefined */ if (rhodefault < 0.0) trialrho = MAX(0.001, 1.0 / wtsum); else trialrho = rhodefault; rfact = 0.0; if (blockfn[0]=='\0') blockfn=NULL; double besttd = BIG, bestrho=trialrho; for (;;) { double targdiff; if (trialrho <= 0.0 || trialrho >= 1.0) { /* Give up search - recalculate with best rho found so far and exit */ trialrho = bestrho; targfnzero = 0.0; } for (i=0; i<ndim; i++) for (j=0; j<ndim; j++) rho[i][j] = trialrho; for (i=0; i<seqlen; i++) for (j=0; j<seqlen; j++) for (a=0; a<21; a++) for (b=0; b<21; b++) if ((a != b && i == j) || pa[i][20] > maxgapf || pa[j][20] > maxgapf) rho[i*21+a][j*21+b] = BIG; /* Mask out regions if block-out list provided */ if (blockfn != NULL) { ifp = fopen(blockfn, "r"); for (;;) { if (fscanf(ifp, "%d %d %lf", &i, &j, &score) != 3) break; for (a=0; a<21; a++) for (b=0; b<21; b++) { rho[(i-1)*21+a][(j-1)*21+b] = score; rho[(j-1)*21+b][(i-1)*21+a] = score; } } fclose(ifp); } glassofast(ndim, cmat, rho, thresh, maxit, approxflg, initflg, wwi, ww); /* Don't attempt interation if too few sequences */ if (targfnzero <= 0.0 || wtsum < seqlen) break; for (npair=nnzero=i=0; i<ndim; i++) for (j=i+1; j<ndim; j++,npair++) if (wwi[i][j] != 0.0) nnzero++; fnzero = (double) nnzero / npair; /* Stop iterating if we have achieved the target sparsity level */ targdiff = fabs(fnzero - targfnzero)/targfnzero; if (targdiff < 0.01) break; if (targdiff < besttd) { besttd = targdiff; bestrho = trialrho; } if (fnzero == 0.0) { /* As we have guessed far too high, halve rho and try again */ trialrho *= 0.5; continue; } if (lastfnzero > 0.0 && fnzero != lastfnzero) rfact = pow(rfact, log(targfnzero / fnzero) / log(fnzero / lastfnzero)); lastfnzero = fnzero; /* Make a small trial step in the appropriate direction */ if (rfact == 0.0) rfact = (fnzero < targfnzero) ? 0.9 : 1.1; trialrho *= rfact; } /* Calculate background corrected scores using average product correction */ pcmat = (double **)allocmat(seqlen, seqlen, sizeof(double)); if (pcmat==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); for(j=0;j<ndim;j++) free(rho[j]); free((void *)rho - sizeof(int)); for(j=0;j<ndim;j++) free(ww[j]); free((void *)ww - sizeof(int)); for(j=0;j<ndim;j++) free(wwi[j]); free((void *)wwi - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } pcsum = (double *)allocvec(seqlen, sizeof(double)); if (pcsum==NULL){ for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); for(j=0;j<ndim;j++) free(rho[j]); free((void *)rho - sizeof(int)); for(j=0;j<ndim;j++) free(ww[j]); free((void *)ww - sizeof(int)); for(j=0;j<ndim;j++) free(wwi[j]); free((void *)wwi - sizeof(int)); for(j=0;j<seqlen;j++) free(pcmat[j]); free((void *)pcmat - sizeof(int)); return Py_BuildValue("Oi", Py_None, 1); } pcmean = 0.0; for (i=0; i<seqlen; i++) for (j=i+1; j<seqlen; j++) { for (pc=a=0; a<20; a++) for (b=0; b<20; b++) pc += fabs(wwi[i*21+a][j*21+b]); pcmat[i][j] = pcmat[j][i] = pc; pcsum[i] += pc; pcsum[j] += pc; pcmean += pc; } pcmean /= seqlen * (seqlen - 1) * 0.5; /* Build final list of predicted contacts */ for (scsum=scsumsq=ncon=i=0; i<seqlen; i++) { for (j=i; j<seqlen; j++) if (pcmat[i][j] > 0.0) { /* Calculate APC score */ if (apcflg) pcmat[i][j] = pcmat[j][i] = pcmat[i][j] - pcsum[i] * pcsum[j] / SQR(seqlen - 1.0) / pcmean; if (j>=i+minseqsep){ scsum += pcmat[i][j]; scsumsq += SQR(pcmat[i][j]); ncon++; } } else{ pcmat[i][j]=pcmat[j][i]=0.; } } mean = scsum / ncon; sd = 1.25 * sqrt(scsumsq / ncon - SQR(mean)); /* Corrected for extreme-value bias */ if (!rawscflg) for (i=0; i<seqlen; i++) for (j=i+minseqsep; j<seqlen; j++) { zscore = (pcmat[i][j] - mean) / sd; ppv = 0.904 / (1.0 + 16.61 * exp(-0.8105 * zscore)); mut[i*seqlen+j]=mut[j*seqlen+i]=ppv; } else for (i=0; i<seqlen; i++) for (j=i+minseqsep; j<seqlen; j++) mut[i*seqlen+j]=mut[j*seqlen+i]=pcmat[i][j]; for (j=0;j<nseqs;j++) free(aln[j]); free(aln); free(weight); free(wtcount); for (j=0;j<seqlen;j++) free(pa[j]); free((void *)pa - sizeof(int)); for (j=0;j<ndim;j++) free(cmat[j]); free((void *)cmat - sizeof(int)); for(j=0;j<ndim;j++) free(tempmat[j]); free((void *)tempmat - sizeof(int)); for(j=0;j<ndim;j++) free(rho[j]); free((void *)rho - sizeof(int)); for(j=0;j<ndim;j++) free(ww[j]); free((void *)ww - sizeof(int)); for(j=0;j<ndim;j++) free(wwi[j]); free((void *)wwi - sizeof(int)); for(j=0;j<seqlen;j++) free(pcmat[j]); free((void *)pcmat - sizeof(int)); free(pcsum); return Py_BuildValue("O", psicov); } static PyMethodDef Ccorrelation_p_methods[] = { {"msapsicov", (PyCFunction)msapsicov, METH_VARARGS | METH_KEYWORDS, "Return PSICOV matrix calculated for given character \n" "array that contains an MSA."}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef Ccorrelation_pmodule = { PyModuleDef_HEAD_INIT, "Ccorrelation_p", "MSA correlation tools with parallel.", -1, Ccorrelation_p_methods, }; PyMODINIT_FUNC PyInit_Ccorrelation_p(void) { import_array(); return PyModule_Create(&Ccorrelation_pmodule); } #else PyMODINIT_FUNC initCcorrelation_p(void) { Py_InitModule3("Ccorrelation_p", Ccorrelation_p_methods, "MSA correlation tools with parallel."); import_array(); } #endif
GB_add_phase0.c
//------------------------------------------------------------------------------ // GB_add_phase0: find vectors of C to compute for C=A+B or C<M>=A+B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // The eWise add of two matrices, C=A+B, C<M>=A+B, or C<!M>=A+B starts with // this phase, which determines which vectors of C need to be computed. // This phase is also used for GB_masker. // On input, A and B are the two matrices being added, and M is the optional // mask matrix (not complemented). The complemented mask is handed in GB_mask, // not here. // The A matrix can be sparse, hypersparse, slice, or hyperslice. The B matrix // can only be sparse or hypersparse. See GB_wait, which can pass in A as any // of the four formats. In this case, no mask is present. // On output, an integer (Cnvec) a boolean (Ch_to_Mh) and up to 3 arrays are // returned, either NULL or of size Cnvec. Let n = A->vdim be the vector // dimension of A, B, M and C. // Ch: the list of vectors to compute. If not NULL, Ch [k] = j is the // kth vector in C to compute, which will become the hyperlist C->h of C. // Note that some of these vectors may turn out to be empty, because of // the mask, or because the vector j appeared in A or B, but is empty. // It is pruned at the end of GB_add_phase2. If Ch is NULL then it is an // implicit list of size n, and Ch [k] == k for all k = 0:n-1. In this // case, C will be a standard matrix, not hypersparse. Thus, the kth // vector is j = (Ch == NULL) ? k : Ch [k]. // Ch is freed by GB_add if phase1 fails. phase2 either frees it or // transplants it into C. // Ch_is_Mh: true if the mask M is present, hypersparse, and not // complemented, false otherwise. In this case Ch is a deep copy of Mh. // Only GB_add uses this option; it is not used by GB_masker (Ch_is_Mh // is always false for GB_masker). This is determined by passing in // p_Ch_is_Mh as a NULL or non-NULL pointer. // C_to_A: if A is hypersparse, then C_to_A [k] = kA if the kth vector, j // = (Ch == NULL) ? k : Ch [k] appears in A, as j = Ah [kA]. If j does // not appear in A, then C_to_A [k] = -1. If A is not hypersparse, then // C_to_A is returned as NULL. // C_to_B: if B is hypersparse, then C_to_B [k] = kB if the kth vector, j // = (Ch == NULL) ? k : Ch [k] appears in B, as j = Bh [kB]. If j does // not appear in B, then C_to_B [k] = -1. If B is not hypersparse, then // C_to_B is returned as NULL. // C_to_M: if M is hypersparse, and Ch_is_Mh is false, then C_to_M [k] = // kM if the kth vector, j = (Ch == NULL) ? k : Ch [k] appears in M, as j // = Mh [kM]. If j does not appear in M, then C_to_M [k] = -1. If M is // not hypersparse, then C_to_M is returned as NULL. #include "GB_add.h" //------------------------------------------------------------------------------ // GB_allocate_result //------------------------------------------------------------------------------ static inline bool GB_allocate_result ( int64_t Cnvec, int64_t *restrict *Ch_handle, int64_t *restrict *C_to_M_handle, int64_t *restrict *C_to_A_handle, int64_t *restrict *C_to_B_handle ) { bool ok = true ; if (Ch_handle != NULL) { GB_MALLOC_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ; ok = (*Ch_handle != NULL) ; } if (C_to_M_handle != NULL) { GB_MALLOC_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ; ok = ok && (*C_to_M_handle != NULL) ; } if (C_to_A_handle != NULL) { GB_MALLOC_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ; ok = ok && (*C_to_A_handle != NULL) ; } if (C_to_B_handle != NULL) { GB_MALLOC_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ; ok = ok && (*C_to_B_handle != NULL) ; } if (!ok) { // out of memory if (Ch_handle != NULL) { GB_FREE_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ; } if (C_to_M_handle != NULL) { GB_FREE_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ; } if (C_to_A_handle != NULL) { GB_FREE_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ; } if (C_to_B_handle != NULL) { GB_FREE_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ; } } return (ok) ; } //------------------------------------------------------------------------------ // GB_add_phase0: find the vectors of C for C<M>=A+B //------------------------------------------------------------------------------ GrB_Info GB_add_phase0 // find vectors in C for C=A+B or C<M>=A+B ( int64_t *p_Cnvec, // # of vectors to compute in C int64_t *restrict *Ch_handle, // Ch: size Cnvec, or NULL int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL int64_t *restrict *C_to_B_handle, // C_to_B: of size Cnvec, or NULL bool *p_Ch_is_Mh, // if true, then Ch == Mh const GrB_Matrix M, // optional mask, may be NULL; not complemented const GrB_Matrix A, // standard, hypersparse, slice, or hyperslice const GrB_Matrix B, // standard or hypersparse; never a slice GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_Cnvec != NULL) ; ASSERT (Ch_handle != NULL) ; ASSERT (C_to_A_handle != NULL) ; ASSERT (C_to_B_handle != NULL) ; ASSERT_OK (GB_check (A, "A for add phase0", GB0)) ; ASSERT_OK (GB_check (B, "B for add phase0", GB0)) ; ASSERT_OK_OR_NULL (GB_check (M, "M for add phase0", GB0)) ; ASSERT (A->vdim == B->vdim) ; ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ; //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- int64_t *restrict Ch = NULL ; int64_t *restrict C_to_M = NULL ; int64_t *restrict C_to_A = NULL ; int64_t *restrict C_to_B = NULL ; (*Ch_handle) = NULL ; (*C_to_A_handle) = NULL ; (*C_to_B_handle) = NULL ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = NULL ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = 1 ; // nthreads depends on Cnvec, computed below //-------------------------------------------------------------------------- // get content of M, A, and B //-------------------------------------------------------------------------- int64_t Cnvec ; int64_t n = A->vdim ; int64_t Anvec = A->nvec ; bool A_is_hyper = A->is_hyper ; bool A_is_slice = A->is_slice ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = (A_is_hyper) ? A->h : NULL ; const int64_t A_hfirst = A->hfirst ; #define GB_Ah(k) (A_is_hyper ? Ah [k] : (A_hfirst + (k))) int64_t Bnvec = B->nvec ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; bool B_is_hyper = B->is_hyper ; ASSERT (!B->is_slice) ; int64_t Mnvec = 0 ; const int64_t *restrict Mp = NULL ; const int64_t *restrict Mh = NULL ; bool M_is_hyper = false ; if (M != NULL) { Mnvec = M->nvec ; Mp = M->p ; Mh = M->h ; M_is_hyper = M->is_hyper ; ASSERT (!M->is_slice) ; } // For GB_add, if M is present, hypersparse, and not complemented, then C // will be hypersparse, and it will have set of vectors as M (Ch == Mh). // For GB_masker, Ch is never equal to Mh. bool Ch_is_Mh = (p_Ch_is_Mh != NULL) && (M != NULL && M_is_hyper) ; //-------------------------------------------------------------------------- // find the set union of the non-empty vectors of A and B //-------------------------------------------------------------------------- if (Ch_is_Mh) { //---------------------------------------------------------------------- // C is hypersparse, with the same vectors as the hypersparse M //---------------------------------------------------------------------- // This step is done for GB_add only, not GB_masker. // GB_wait is the only place where A may be a slice, and it does not // use a mask. So this phase can ignore the case where A is a slice. Cnvec = Mnvec ; nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; ASSERT (!A_is_slice) ; if (!GB_allocate_result (Cnvec, &Ch, NULL, (A_is_hyper) ? (&C_to_A) : NULL, (B_is_hyper) ? (&C_to_B) : NULL)) { // out of memory return (GB_OUT_OF_MEMORY) ; } // copy Mh into Ch. Ch is Mh so C_to_M is not needed. GB_memcpy (Ch, Mh, Mnvec * sizeof (int64_t), nthreads) ; // construct the mapping from C to A and B, if they are hypersparse if (A_is_hyper || B_is_hyper) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < Cnvec ; k++) { int64_t j = Ch [k] ; if (A_is_hyper) { // C_to_A [k] = kA if Ah [kA] == j and A(:,j) is non-empty int64_t kA = 0, pA, pA_end ; GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ; C_to_A [k] = (pA < pA_end) ? kA : -1 ; } if (B_is_hyper) { // C_to_B [k] = kB if Bh [kB] == j and B(:,j) is non-empty int64_t kB = 0, pB, pB_end ; GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ; C_to_B [k] = (pB < pB_end) ? kB : -1 ; } } } } else if ((A_is_hyper || A_is_slice) && B_is_hyper) { //---------------------------------------------------------------------- // A is hypersparse or a hyperslice, and B is hypersparse //---------------------------------------------------------------------- // Ch is the set union of Ah and Bh. This is handled with a parallel // merge, since Ah and Bh are both sorted lists. //---------------------------------------------------------------------- // phase 0: create the tasks //---------------------------------------------------------------------- double work = GB_IMIN (Anvec + Bnvec, n) ; nthreads = GB_nthreads (work, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; ntasks = GB_IMIN (ntasks, work) ; int64_t kA_start [ntasks+1] ; int64_t kB_start [ntasks+1] ; int64_t kC_start [ntasks+1] ; kA_start [0] = (Anvec == 0) ? -1 : 0 ; kB_start [0] = (Bnvec == 0) ? -1 : 0 ; kA_start [ntasks] = (Anvec == 0) ? -1 : Anvec ; kB_start [ntasks] = (Bnvec == 0) ? -1 : Bnvec ; for (int taskid = 1 ; taskid < ntasks ; taskid++) { // create tasks: A and B are both hyper double target_work = ((ntasks-taskid) * work) / ntasks ; GB_slice_vector (NULL, NULL, &(kA_start [taskid]), &(kB_start [taskid]), 0, 0, NULL, // Mi not present 0, Anvec, Ah, A_hfirst, // Ah, explicit or implicit list 0, Bnvec, Bh, // Bh, explicit list n, // Ah and Bh have dimension n target_work) ; } //---------------------------------------------------------------------- // phase 1: count the entries in the result of each task //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule (dynamic,1) for (int taskid = 0 ; taskid < ntasks ; taskid++) { // merge Ah and Bh into Ch int64_t kA = kA_start [taskid] ; int64_t kB = kB_start [taskid] ; int64_t kA_end = kA_start [taskid+1] ; int64_t kB_end = kB_start [taskid+1] ; int64_t kC = 0 ; for ( ; kA < kA_end && kB < kB_end ; kC++) { int64_t jA = GB_Ah (kA) ; int64_t jB = Bh [kB] ; if (jA < jB) { // jA appears in A but not B kA++ ; } else if (jB < jA) { // jB appears in B but not A kB++ ; } else { // j = jA = jB appears in both A and B kA++ ; kB++ ; } } kC_start [taskid] = kC + (kA_end - kA) + (kB_end - kB) ; } //---------------------------------------------------------------------- // phase 1b: cumulative sum of entries for each task //---------------------------------------------------------------------- GB_cumsum (kC_start, ntasks, NULL, 1) ; Cnvec = kC_start [ntasks] ; //---------------------------------------------------------------------- // allocate the result //---------------------------------------------------------------------- // C will be hypersparse, so Ch is allocated. The mask M is ignored // for computing Ch. Ch is the set union of Ah and Bh. if (!GB_allocate_result (Cnvec, &Ch, (M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, &C_to_B)) { // out of memory return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // phase 2: compute the result //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule (dynamic,1) for (int taskid = 0 ; taskid < ntasks ; taskid++) { // merge Ah and Bh into Ch int64_t kA = kA_start [taskid] ; int64_t kB = kB_start [taskid] ; int64_t kC = kC_start [taskid] ; int64_t kA_end = kA_start [taskid+1] ; int64_t kB_end = kB_start [taskid+1] ; // merge Ah and Bh into Ch for ( ; kA < kA_end && kB < kB_end ; kC++) { int64_t jA = GB_Ah (kA) ; int64_t jB = Bh [kB] ; if (jA < jB) { // append jA to Ch Ch [kC] = jA ; C_to_A [kC] = kA++ ; C_to_B [kC] = -1 ; // jA does not appear in B } else if (jB < jA) { // append jB to Ch Ch [kC] = jB ; C_to_A [kC] = -1 ; // jB does not appear in A C_to_B [kC] = kB++ ; } else { // j appears in both A and B; append it to Ch Ch [kC] = jA ; C_to_A [kC] = kA++ ; C_to_B [kC] = kB++ ; } } if (kA < kA_end) { // B is exhausted but A is not for ( ; kA < kA_end ; kA++, kC++) { // append jA to Ch int64_t jA = GB_Ah (kA) ; Ch [kC] = jA ; C_to_A [kC] = kA ; C_to_B [kC] = -1 ; } } else if (kB < kB_end) { // A is exhausted but B is not for ( ; kB < kB_end ; kB++, kC++) { // append jB to Ch int64_t jB = Bh [kB] ; Ch [kC] = jB ; C_to_A [kC] = -1 ; C_to_B [kC] = kB ; } } ASSERT (kC == kC_start [taskid+1]) ; } //---------------------------------------------------------------------- // check result via a sequential merge //---------------------------------------------------------------------- #ifdef GB_DEBUG // merge Ah and Bh into Ch int64_t kA = 0 ; int64_t kB = 0 ; int64_t kC = 0 ; for ( ; kA < Anvec && kB < Bnvec ; kC++) { int64_t jA = GB_Ah (kA) ; int64_t jB = Bh [kB] ; if (jA < jB) { // append jA to Ch ASSERT (Ch [kC] == jA) ; ASSERT (C_to_A [kC] == kA) ; kA++ ; ASSERT (C_to_B [kC] == -1) ; // jA does not appear in B } else if (jB < jA) { // append jB to Ch ASSERT (Ch [kC] == jB) ; ASSERT (C_to_A [kC] == -1) ; // jB does not appear in A ASSERT (C_to_B [kC] == kB) ; kB++ ; } else { // j appears in both A and B; append it to Ch ASSERT (Ch [kC] == jA) ; ASSERT (C_to_A [kC] == kA) ; kA++ ; ASSERT (C_to_B [kC] == kB) ; kB++ ; } } if (kA < Anvec) { // B is exhausted but A is not for ( ; kA < Anvec ; kA++, kC++) { // append jA to Ch int64_t jA = GB_Ah (kA) ; ASSERT (Ch [kC] == jA) ; ASSERT (C_to_A [kC] == kA) ; ASSERT (C_to_B [kC] == -1) ; } } else if (kB < Bnvec) { // A is exhausted but B is not for ( ; kB < Bnvec ; kB++, kC++) { // append jB to Ch int64_t jB = Bh [kB] ; ASSERT (Ch [kC] == jB) ; ASSERT (C_to_A [kC] == -1) ; ASSERT (C_to_B [kC] == kB) ; } } ASSERT (kC == Cnvec) ; #endif } else if ((A_is_hyper || A_is_slice) && !B_is_hyper) { //---------------------------------------------------------------------- // A is hypersparse, B is standard //---------------------------------------------------------------------- // C will be standard. Construct the C_to_A mapping. Cnvec = n ; nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; if (!GB_allocate_result (Cnvec, NULL, (M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, NULL)) { // out of memory return (GB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t j = 0 ; j < n ; j++) { C_to_A [j] = -1 ; } // scatter Ah into C_to_A #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t kA = 0 ; kA < Anvec ; kA++) { int64_t jA = GB_Ah (kA) ; C_to_A [jA] = kA ; } } else if (!(A_is_hyper || A_is_slice) && B_is_hyper) { //---------------------------------------------------------------------- // A is standard, B is hypersparse //---------------------------------------------------------------------- // C will be standard. Construct the C_to_B mapping. Cnvec = n ; nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; if (!GB_allocate_result (Cnvec, NULL, (M_is_hyper) ? (&C_to_M) : NULL, NULL, &C_to_B)) { // out of memory return (GB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t j = 0 ; j < n ; j++) { C_to_B [j] = -1 ; } // scatter Bh into C_to_B #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t kB = 0 ; kB < Bnvec ; kB++) { int64_t jB = Bh [kB] ; C_to_B [jB] = kB ; } } else { //---------------------------------------------------------------------- // A and B are both standard //---------------------------------------------------------------------- // C will be standard Cnvec = n ; nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; if (!GB_allocate_result (Cnvec, NULL, (M_is_hyper) ? (&C_to_M) : NULL, NULL, NULL)) { // out of memory return (GB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // construct C_to_M if needed //-------------------------------------------------------------------------- if (C_to_M != NULL) { if (Ch != NULL) { // C is hypersparse #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < Cnvec ; k++) { int64_t j = Ch [k] ; // C_to_M [k] = kM if Mh [kM] == j and M(:,j) is non-empty int64_t kM = 0, pM, pM_end ; GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ; C_to_M [k] = (pM < pM_end) ? kM : -1 ; } } else { // this case can occur only if M is present, complemented, and // hypersparse, and C is standard. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t j = 0 ; j < n ; j++) { C_to_M [j] = -1 ; } // scatter Mh into C_to_M #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t kM = 0 ; kM < Mnvec ; kM++) { int64_t jM = Mh [kM] ; C_to_M [jM] = kM ; } } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- (*p_Cnvec ) = Cnvec ; if (p_Ch_is_Mh != NULL) { // return Ch_is_Mh to GB_add. For GB_masker, Ch is never Mh. (*p_Ch_is_Mh) = Ch_is_Mh ; } (*Ch_handle ) = Ch ; (*C_to_A_handle) = C_to_A ; (*C_to_B_handle) = C_to_B ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = C_to_M ; } //-------------------------------------------------------------------------- // The code below describes what the output contains: //-------------------------------------------------------------------------- #ifdef GB_DEBUG ASSERT (A != NULL) ; // A and B are always present ASSERT (B != NULL) ; int64_t jlast = -1 ; for (int64_t k = 0 ; k < Cnvec ; k++) { // C(:,j) is in the list, as the kth vector int64_t j ; if (Ch == NULL) { // C will be constructed as standard sparse j = k ; } else { // C will be constructed as hypersparse j = Ch [k] ; } // vectors j in Ch are sorted, and in the range 0:n-1 ASSERT (j >= 0 && j < n) ; ASSERT (j > jlast) ; jlast = j ; // see if A (:,j) exists if (C_to_A != NULL) { // A is hypersparse, or a slice ASSERT (A->is_hyper || A->is_slice) ; int64_t kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { int64_t jA = GB_Ah (kA) ; ASSERT (j == jA) ; } } else { // A is in standard sparse form // C_to_A exists only if A is hypersparse ASSERT (!(A->is_hyper || A->is_slice)) ; } // see if B (:,j) exists if (C_to_B != NULL) { // B is hypersparse ASSERT (B->is_hyper) ; int64_t kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { int64_t jB = B->h [kB] ; ASSERT (j == jB) ; } } else { // B is in standard sparse form // C_to_B exists only if B is hypersparse ASSERT (!B->is_hyper) ; } // see if M (:,j) exists if (Ch_is_Mh) { // Ch is the same as Mh ASSERT (M != NULL) ; ASSERT (M->is_hyper) ; ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ; ASSERT (C_to_M == NULL) ; } else if (C_to_M != NULL) { // M is present and hypersparse ASSERT (M != NULL) ; ASSERT (M->is_hyper) ; int64_t kM = C_to_M [k] ; ASSERT (kM >= -1 && kM < M->nvec) ; if (kM >= 0) { int64_t jM = M->h [kM] ; ASSERT (j == jM) ; } } else { // M is not present, or in standard form ASSERT (M == NULL || !(M->is_hyper)) ; } } #endif return (GrB_SUCCESS) ; }
GB_binop__div_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_int8) // A.*B function (eWiseMult): GB (_AemultB_08__div_int8) // A.*B function (eWiseMult): GB (_AemultB_02__div_int8) // A.*B function (eWiseMult): GB (_AemultB_04__div_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int8) // A*D function (colscale): GB (_AxD__div_int8) // D*A function (rowscale): GB (_DxB__div_int8) // C+=B function (dense accum): GB (_Cdense_accumB__div_int8) // C+=b function (dense accum): GB (_Cdense_accumb__div_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int8) // C=scalar+B GB (_bind1st__div_int8) // C=scalar+B' GB (_bind1st_tran__div_int8) // C=A+scalar GB (_bind2nd__div_int8) // C=A'+scalar GB (_bind2nd_tran__div_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (x, y, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \ } GrB_Info GB (_bind1st_tran__div_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \ } GrB_Info GB (_bind2nd_tran__div_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
utils.c
/** * \file utils.c * \brief Generic tool utilities * * \author J.R.Versteegh <j.r.versteegh@orca-st.com> * * \copyright (C) 2017 SURFnet. All rights reserved. * \license This software may be modified and distributed under the * terms of the BSD license. See the LICENSE file for details. */ #include <stdio.h> #include <stdarg.h> #include "types.h" #include "utils.h" void msg(log_level_t log_level, const char *message, ...) { va_list args; va_start(args, message); switch (log_level) { case (log_debug): #ifdef DEBUG #pragma omp critical vfprintf(stderr, message, args); #endif break; case (log_info): #pragma omp critical vfprintf(stderr, message, args); break; case (log_error): #pragma omp critical vfprintf(stderr, message, args); break; default: #pragma omp critical fprintf(stderr, "Unknown loglevel\n"); } va_end(args); }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i+1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i+1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i+num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i-1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i-1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i+1); hypreDevice_IntegerInclusiveScan(B_nrows+1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i+B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncCudaComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, HYPRE_Int want_data, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i-1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i-1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); if (want_data) { B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); } for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ if (want_data) { comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); } else { comm_handle_a = NULL; } comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1,1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p+i] = read_only_load(d_diag_j + i); d_ab[p+i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p+i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p+i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultCUDABlockDimension(); const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; nrows = row_end - row_start; if (row < row_start || row >= row_end) { return(-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } /* Get element-wise tolerances based on row norms for ParCSRMatrix * NOTE: Keep the diagonal, i.e. elmt_tol = 0.0 for diagonals * Output vectors have size nnz: * elmt_tols_diag[j] = tol * (norm of row i) for j in [ A_diag_i[i] , A_diag_i[i+1] ) * elmt_tols_offd[j] = tol * (norm of row i) for j in [ A_offd_i[i] , A_offd_i[i+1] ) * type == -1, infinity norm, * 1, 1-norm * 2, 2-norm */ template<HYPRE_Int type> __global__ void hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols( HYPRE_Int nrows, HYPRE_Real tol, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Complex *A_diag_a, HYPRE_Int *A_offd_i, HYPRE_Complex *A_offd_a, HYPRE_Real *elmt_tols_diag, HYPRE_Real *elmt_tols_offd) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1,1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Int p_diag, p_offd, q_diag, q_offd; /* sum row norm over diag part */ if (lane < 2) { p_diag = read_only_load(A_diag_i + row_i + lane); } q_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 1); p_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 0); HYPRE_Real row_norm_i = 0.0; for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE) { HYPRE_Complex val = A_diag_a[j]; if (type == -1) { row_norm_i = hypre_max(row_norm_i, hypre_cabs(val)); } else if (type == 1) { row_norm_i += hypre_cabs(val); } else if (type == 2) { row_norm_i += val * val; } } /* sum row norm over offd part */ if (lane < 2) { p_offd = read_only_load(A_offd_i + row_i + lane); } q_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 1); p_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 0); for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE) { HYPRE_Complex val = A_offd_a[j]; if (type == -1) { row_norm_i = hypre_max(row_norm_i, hypre_cabs(val)); } else if (type == 1) { row_norm_i += hypre_cabs(val); } else if (type == 2) { row_norm_i += val * val; } } /* allreduce to get the row norm on all threads */ if (type == -1) { row_norm_i = warp_allreduce_max(row_norm_i); } else { row_norm_i = warp_allreduce_sum(row_norm_i); } if (type == 2) { row_norm_i = sqrt(row_norm_i); } /* set elmt_tols_diag */ for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE) { HYPRE_Int col = A_diag_j[j]; /* elmt_tol = 0.0 ensures diagonal will be kept */ if (col == row_i) { elmt_tols_diag[j] = 0.0; } else { elmt_tols_diag[j] = tol * row_norm_i; } } /* set elmt_tols_offd */ for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE) { elmt_tols_offd[j] = tol * row_norm_i; } } /* drop the entries that are not on the diagonal and smaller than: * type 0: tol * type 1: tol*(1-norm of row) * type 2: tol*(2-norm of row) * type -1: tol*(infinity norm of row) */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A, HYPRE_Complex tol, HYPRE_Int type) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Real *elmt_tols_diag = NULL; HYPRE_Real *elmt_tols_offd = NULL; if (col_map_offd_A == NULL) { col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A; } /* get elmement-wise tolerances if needed */ if (type != 0) { elmt_tols_diag = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_diag), HYPRE_MEMORY_DEVICE); elmt_tols_offd = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); } dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); if (type == -1) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<-1>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } if (type == 1) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<1>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } if (type == 2) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<2>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } /* drop entries from diag and offd CSR matrices */ hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, elmt_tols_diag); hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, elmt_tols_offd); hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); /* squeeze out zero columns of A_offd */ HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new; tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); tmp_end = HYPRE_THRUST_CALL( unique, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); num_cols_A_offd_new = tmp_end - tmp_j; hypre_assert(num_cols_A_offd_new <= num_cols_A_offd); if (num_cols_A_offd_new < num_cols_A_offd) { hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new; HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( scatter, thrust::counting_iterator<HYPRE_Int>(0), thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new), tmp_j, offd_mark ); HYPRE_THRUST_CALL( gather, hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd), offd_mark, hypre_CSRMatrixJ(A_offd) ); HYPRE_THRUST_CALL( gather, tmp_j, tmp_j + num_cols_A_offd_new, col_map_offd_A, col_map_offd_A_new ); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE); hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new; hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } if (type != 0) { hypre_TFree(elmt_tols_diag, HYPRE_MEMORY_DEVICE); hypre_TFree(elmt_tols_offd, HYPRE_MEMORY_DEVICE); } hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTransposeDevice *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTransposeDevice( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *A_diagT; hypre_CSRMatrix *AT_offd; HYPRE_Int num_procs; HYPRE_Int num_cols_offd_AT = 0; HYPRE_BigInt *col_map_offd_AT = NULL; hypre_ParCSRMatrix *AT; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); if (num_procs > 1) { void *request; hypre_CSRMatrix *A_offdT, *Aext; HYPRE_Int *Aext_ii, *Aext_j, Aext_nnz; HYPRE_Complex *Aext_data; HYPRE_BigInt *tmp_bigj; hypre_CSRMatrixTranspose(A_offd, &A_offdT, data); hypre_CSRMatrixBigJ(A_offdT) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumNonzeros(A_offdT), HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixJ(A_offdT), hypre_CSRMatrixJ(A_offdT) + hypre_CSRMatrixNumNonzeros(A_offdT), thrust::make_constant_iterator(hypre_ParCSRMatrixFirstRowIndex(A)), hypre_CSRMatrixBigJ(A_offdT), thrust::plus<HYPRE_BigInt>() ); if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ExchangeExternalRowsDeviceInit(A_offdT, hypre_ParCSRMatrixCommPkg(A), data, &request); hypre_CSRMatrixTranspose(A_diag, &A_diagT, data); Aext = hypre_ExchangeExternalRowsDeviceWait(request); hypre_CSRMatrixDestroy(A_offdT); // Aext contains offd of AT Aext_nnz = hypre_CSRMatrixNumNonzeros(Aext); Aext_ii = hypreDevice_CsrRowPtrsToIndices(hypre_CSRMatrixNumRows(Aext), Aext_nnz, hypre_CSRMatrixI(Aext)); hypre_ParCSRCommPkgCopySendMapElmtsToDevice(hypre_ParCSRMatrixCommPkg(A)); HYPRE_THRUST_CALL( gather, Aext_ii, Aext_ii + Aext_nnz, hypre_ParCSRCommPkgDeviceSendMapElmts(hypre_ParCSRMatrixCommPkg(A)), Aext_ii ); tmp_bigj = hypre_TAlloc(HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_bigj, hypre_CSRMatrixBigJ(Aext), HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_bigj, tmp_bigj + Aext_nnz ); HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp_bigj, tmp_bigj + Aext_nnz ); num_cols_offd_AT = new_end - tmp_bigj; col_map_offd_AT = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_AT, tmp_bigj, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_TFree(tmp_bigj, HYPRE_MEMORY_DEVICE); Aext_j = hypre_TAlloc(HYPRE_Int, Aext_nnz, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( lower_bound, col_map_offd_AT, col_map_offd_AT + num_cols_offd_AT, hypre_CSRMatrixBigJ(Aext), hypre_CSRMatrixBigJ(Aext) + Aext_nnz, Aext_j ); Aext_data = hypre_CSRMatrixData(Aext); hypre_CSRMatrixData(Aext) = NULL; hypre_CSRMatrixDestroy(Aext); if (data) { hypreDevice_StableSortByTupleKey(Aext_nnz, Aext_ii, Aext_j, Aext_data, 0); } else { HYPRE_THRUST_CALL( stable_sort, thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)), thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)) + Aext_nnz ); } AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), num_cols_offd_AT, Aext_nnz); hypre_CSRMatrixJ(AT_offd) = Aext_j; hypre_CSRMatrixData(AT_offd) = Aext_data; hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE); hypreDevice_CsrRowIndicesToPtrs_v2(hypre_CSRMatrixNumRows(AT_offd), Aext_nnz, Aext_ii, hypre_CSRMatrixI(AT_offd)); hypre_TFree(Aext_ii, HYPRE_MEMORY_DEVICE); } else { hypre_CSRMatrixTransposeDevice(A_diag, &A_diagT, data); AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), 0, 0); hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE); } AT = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixColStarts(A), hypre_ParCSRMatrixRowStarts(A), num_cols_offd_AT, hypre_CSRMatrixNumNonzeros(A_diagT), hypre_CSRMatrixNumNonzeros(AT_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(AT)); hypre_ParCSRMatrixDiag(AT) = A_diagT; hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(AT)); hypre_ParCSRMatrixOffd(AT) = AT_offd; if (num_cols_offd_AT) { hypre_ParCSRMatrixDeviceColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixColMapOffd(AT) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AT), col_map_offd_AT, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } *AT_ptr = AT; return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixAddDevice( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_cols_offd_C = 0; HYPRE_BigInt *d_col_map_offd_C = NULL; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); hypre_CSRMatrix *C_diag = hypre_CSRMatrixAddDevice(alpha, A_diag, beta, B_diag); hypre_CSRMatrix *C_offd; //if (num_cols_offd_A || num_cols_offd_B) if (num_procs > 1) { hypre_ParCSRMatrixCopyColMapOffdToDevice(A); hypre_ParCSRMatrixCopyColMapOffdToDevice(B); HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A + num_cols_offd_B, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp, hypre_ParCSRMatrixDeviceColMapOffd(A), HYPRE_BigInt, num_cols_offd_A, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp + num_cols_offd_A, hypre_ParCSRMatrixDeviceColMapOffd(B), HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp, tmp + num_cols_offd_A + num_cols_offd_B ); HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp, tmp + num_cols_offd_A + num_cols_offd_B ); num_cols_offd_C = new_end - tmp; d_col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_C, tmp, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* reuse memory of tmp */ HYPRE_Int *offd_A2C = (HYPRE_Int *) tmp; HYPRE_Int *offd_B2C = offd_A2C + num_cols_offd_A; HYPRE_THRUST_CALL( lower_bound, d_col_map_offd_C, d_col_map_offd_C + num_cols_offd_C, hypre_ParCSRMatrixDeviceColMapOffd(A), hypre_ParCSRMatrixDeviceColMapOffd(A) + num_cols_offd_A, offd_A2C ); HYPRE_THRUST_CALL( lower_bound, d_col_map_offd_C, d_col_map_offd_C + num_cols_offd_C, hypre_ParCSRMatrixDeviceColMapOffd(B), hypre_ParCSRMatrixDeviceColMapOffd(B) + num_cols_offd_B, offd_B2C ); HYPRE_Int *C_offd_i, *C_offd_j, nnzC_offd; HYPRE_Complex *C_offd_a; hypreDevice_CSRSpAdd( hypre_CSRMatrixNumRows(A_offd), hypre_CSRMatrixNumRows(B_offd), num_cols_offd_C, hypre_CSRMatrixNumNonzeros(A_offd), hypre_CSRMatrixNumNonzeros(B_offd), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), alpha, hypre_CSRMatrixData(A_offd), offd_A2C, hypre_CSRMatrixI(B_offd), hypre_CSRMatrixJ(B_offd), beta, hypre_CSRMatrixData(B_offd), offd_B2C, NULL, &nnzC_offd, &C_offd_i, &C_offd_j, &C_offd_a ); hypre_TFree(tmp, HYPRE_MEMORY_DEVICE); C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), num_cols_offd_C, nnzC_offd); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_DEVICE; } else { C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), 0, 0); hypre_CSRMatrixInitialize_v2(C_offd, 0, HYPRE_MEMORY_DEVICE); } /* Create ParCSRMatrix C */ HYPRE_BigInt *row_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *C = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), row_starts_C, col_starts_C, num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_ParCSRMatrixDeviceColMapOffd(C) = d_col_map_offd_C; hypre_ParCSRMatrixColMapOffd(C) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(C), d_col_map_offd_C, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data); //hypre_SyncCudaComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i] / A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
GB_unaryop__lnot_int64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_int8 // op(A') function: GB_tran__lnot_int64_int8 // C type: int64_t // A type: int8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_int8 ( int64_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sum_cpu_omp_kernel.c
// // Created by Yonghong Yan on 1/7/16. // #include "sum.h" #include <homp.h> #ifdef USE_INTEL_MKL #include <mkl.h> #endif void sum_cpu_omp_wrapper(omp_offloading_t *off, long start_n, long length_n, REAL *x, REAL *y, REAL *result) { int num_omp_threads = off->dev->num_cores; long i; REAL sum = 0.0; #pragma omp parallel for simd shared(y, x, start_n, length_n) private(i) num_threads(num_omp_threads) reduction(+:sum) for (i=start_n; i<start_n + length_n; i++) { sum += y[i] * x[i]; // printf("x[%d]: %f, y[%d]: %f\n", i, x[i], i, y[i]); } *result = sum; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
automine_base.h
void automine_3motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters) { #pragma omp parallel { auto &counter = global_counters.at(omp_get_thread_num()); #pragma omp for schedule(dynamic,1) for(vidType v0 = 0; v0 < g.V(); v0++) { VertexSet y0 = g.N(v0); VertexSet y0f0 = bounded(y0,v0); for(vidType idx1 = 0; idx1 < y0.size(); idx1++) { vidType v1 = y0.begin()[idx1]; VertexSet y1 = g.N(v1); counter[0] += difference_num(y0, y1, v1); } for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) { vidType v1 = y0f0.begin()[idx1]; VertexSet y1 = g.N(v1); counter[1] += intersection_num(y0f0, y1, v1); } } } } void automine_4motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters) { #pragma omp parallel { auto &counter = global_counters.at(omp_get_thread_num()); #pragma omp for schedule(dynamic,1) for(vidType v0 = 0; v0 < g.V(); v0++) { VertexSet y0 = g.N(v0); VertexSet y0f0 = bounded(y0,v0); for(vidType idx1 = 0; idx1 < y0.size(); idx1++) { vidType v1 = y0.begin()[idx1]; VertexSet y1 = g.N(v1); VertexSet y0n1f1 = difference_set(y0, y1, v1); for(vidType idx2 = 0; idx2 < y0n1f1.size(); idx2++) { vidType v2 = y0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[0] += difference_num(y0n1f1, y2, v2); } } for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) { vidType v1 = y0f0.begin()[idx1]; VertexSet y1 = g.N(v1); VertexSet y0y1 = intersection_set(y0, y1); VertexSet y0f0y1f1 = intersection_set(y0f0, y1, v1); VertexSet n0y1; difference_set(n0y1,y1, y0); VertexSet n0f0y1; difference_set(n0f0y1,y1, y0); VertexSet y0n1 = difference_set(y0, y1); VertexSet y0f0n1f1 = difference_set(y0f0, y1, v1); for(vidType idx2 = 0; idx2 < y0y1.size(); idx2++) { vidType v2 = y0y1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[4] += difference_num(y0y1, y2, v2); VertexSet n0n1y2; counter[2] += difference_num(difference_set(n0n1y2,y2, y0), y1); } for(vidType idx2 = 0; idx2 < y0f0y1f1.size(); idx2++) { vidType v2 = y0f0y1f1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[5] += intersection_num(y0f0y1f1, y2, v2); } for(vidType idx2 = 0; idx2 < y0n1.size(); idx2++) { vidType v2 = y0n1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[1] += difference_num(n0y1, y2); } for(vidType idx2 = 0; idx2 < y0f0n1f1.size(); idx2++) { vidType v2 = y0f0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); counter[3] += intersection_num(n0f0y1, y2, v0); } } } } } void automine_5motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters) { #pragma omp parallel { auto &counter = global_counters.at(omp_get_thread_num()); #pragma omp for schedule(dynamic,1) for(vidType v0 = 0; v0 < g.V(); v0++) { VertexSet y0 = g.N(v0); VertexSet y0f0 = bounded(y0,v0); for(vidType idx1 = 0; idx1 < y0.size(); idx1++) { vidType v1 = y0.begin()[idx1]; VertexSet y1 = g.N(v1); VertexSet y0y1 = intersection_set(y0, y1); VertexSet n0y1; difference_set(n0y1,y1, y0); VertexSet n0y1f0 = bounded(n0y1,v0); VertexSet y0n1 = difference_set(y0, y1); VertexSet y0n1f1 = bounded(y0n1,v1); for(vidType idx2 = 0; idx2 < n0y1f0.size(); idx2++) { vidType v2 = n0y1f0.begin()[idx2]; VertexSet y2 = g.N(v2); VertexSet y0n1y2 = intersection_set(y0n1, y2); VertexSet y0n1y2f1 = bounded(y0n1y2,v1); VertexSet y0n1f1y2 = intersection_set(y0n1f1, y2); for(vidType idx3 = 0; idx3 < y0n1y2.size(); idx3++) { vidType v3 = y0n1y2.begin()[idx3]; VertexSet y3 = g.N(v3); VertexSet n0n1n2y3; counter[3] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2); } for(vidType idx3 = 0; idx3 < y0n1y2f1.size(); idx3++) { vidType v3 = y0n1y2f1.begin()[idx3]; VertexSet y3 = g.N(v3); counter[5] += difference_num(y0n1f1y2, y3, v3); } } for(vidType idx2 = 0; idx2 < y0n1.size(); idx2++) { vidType v2 = y0n1.begin()[idx2]; VertexSet y2 = g.N(v2); VertexSet n0y1n2 = difference_set(n0y1, y2); VertexSet y0n1n2f2 = difference_set(y0n1, y2, v2); for(vidType idx3 = 0; idx3 < y0n1n2f2.size(); idx3++) { vidType v3 = y0n1n2f2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[1] += difference_num(n0y1n2, y3); } } for(vidType idx2 = 0; idx2 < y0n1f1.size(); idx2++) { vidType v2 = y0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); VertexSet y0y1y2 = intersection_set(y0y1, y2); VertexSet n0n1y2; difference_set(n0n1y2,difference_set(n0n1y2,y2, y0), y1); VertexSet n0y1n2 = difference_set(n0y1, y2); VertexSet y0n1f1n2f2 = difference_set(y0n1f1, y2, v2); for(vidType idx3 = 0; idx3 < y0y1y2.size(); idx3++) { vidType v3 = y0y1y2.begin()[idx3]; VertexSet y3 = g.N(v3); VertexSet n0n1n2y3; counter[4] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2); } for(vidType idx3 = 0; idx3 < n0y1n2.size(); idx3++) { vidType v3 = n0y1n2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[8] += difference_num(n0n1y2, y3); } for(vidType idx3 = 0; idx3 < y0n1f1n2f2.size(); idx3++) { vidType v3 = y0n1f1n2f2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[0] += difference_num(y0n1f1n2f2, y3, v3); } } } for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) { vidType v1 = y0f0.begin()[idx1]; VertexSet y1 = g.N(v1); VertexSet y0y1 = intersection_set(y0, y1); VertexSet y0f0y1f1 = intersection_set(y0f0, y1, v1); VertexSet n0y1; difference_set(n0y1,y1, y0); VertexSet n0f0y1; difference_set(n0f0y1,y1, y0); VertexSet y0n1 = difference_set(y0, y1); VertexSet y0f0n1f1 = difference_set(y0f0, y1, v1); for(vidType idx2 = 0; idx2 < y0y1.size(); idx2++) { vidType v2 = y0y1.begin()[idx2]; VertexSet y2 = g.N(v2); VertexSet n0y1y2 = intersection_set(n0y1, y2); VertexSet n0f0y1y2 = intersection_set(n0f0y1, y2); VertexSet y0n1y2 = intersection_set(y0n1, y2); VertexSet y0f0n1y2f1 = intersection_set(y0f0n1f1, y2, v1); VertexSet y0y1n2 = difference_set(y0y1, y2); VertexSet y0y1n2f2 = bounded(y0y1n2,v2); VertexSet n0n1y2; difference_set(n0n1y2,difference_set(n0n1y2,y2, y0), y1); VertexSet n0n1y2f0 = bounded(n0n1y2,v0); VertexSet n0f0n1y2; difference_set(n0f0n1y2,difference_set(n0f0n1y2,y2, y0), y1); VertexSet n0y1n2 = difference_set(n0y1, y2); VertexSet y0n1n2 = difference_set(y0n1, y2); for(vidType idx3 = 0; idx3 < y0n1y2.size(); idx3++) { vidType v3 = y0n1y2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[13] += difference_num(n0y1y2, y3); } for(vidType idx3 = 0; idx3 < y0f0n1y2f1.size(); idx3++) { vidType v3 = y0f0n1y2f1.begin()[idx3]; VertexSet y3 = g.N(v3); counter[18] += intersection_num(n0f0y1y2, y3, v0); } for(vidType idx3 = 0; idx3 < y0y1n2.size(); idx3++) { vidType v3 = y0y1n2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[17] += intersection_num(y0y1n2, y3, v3); VertexSet n0n1n2y3; counter[10] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2); } for(vidType idx3 = 0; idx3 < y0y1n2f2.size(); idx3++) { vidType v3 = y0y1n2f2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[16] += intersection_num(n0n1y2, y3); counter[6] += difference_num(y0y1n2f2, y3, v3); } for(vidType idx3 = 0; idx3 < n0n1y2.size(); idx3++) { vidType v3 = n0n1y2.begin()[idx3]; VertexSet y3 = g.N(v3); VertexSet n0n1n2y3; counter[9] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2); counter[2] += difference_num(n0n1y2, y3, v3); } for(vidType idx3 = 0; idx3 < n0n1y2f0.size(); idx3++) { vidType v3 = n0n1y2f0.begin()[idx3]; VertexSet y3 = g.N(v3); counter[14] += intersection_num(n0f0n1y2, y3, v3); } for(vidType idx3 = 0; idx3 < y0n1n2.size(); idx3++) { vidType v3 = y0n1n2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[12] += intersection_num(n0y1n2, y3); counter[7] += difference_num(n0y1n2, y3); } } for(vidType idx2 = 0; idx2 < y0f0y1f1.size(); idx2++) { vidType v2 = y0f0y1f1.begin()[idx2]; VertexSet y2 = g.N(v2); VertexSet y0y1y2 = intersection_set(y0y1, y2); VertexSet y0f0y1f1y2f2 = intersection_set(y0f0y1f1, y2, v2); for(vidType idx3 = 0; idx3 < y0y1y2.size(); idx3++) { vidType v3 = y0y1y2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[19] += difference_num(y0y1y2, y3, v3); VertexSet n0n1n2y3; counter[15] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2); } for(vidType idx3 = 0; idx3 < y0f0y1f1y2f2.size(); idx3++) { vidType v3 = y0f0y1f1y2f2.begin()[idx3]; VertexSet y3 = g.N(v3); counter[20] += intersection_num(y0f0y1f1y2f2, y3, v3); } } for(vidType idx2 = 0; idx2 < y0f0n1f1.size(); idx2++) { vidType v2 = y0f0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); VertexSet n0f0n1y2; difference_set(n0f0n1y2,difference_set(n0f0n1y2,y2, y0), y1); VertexSet n0y1n2f0 = difference_set(n0f0y1, y2, v0); for(vidType idx3 = 0; idx3 < n0y1n2f0.size(); idx3++) { vidType v3 = n0y1n2f0.begin()[idx3]; VertexSet y3 = g.N(v3); counter[11] += intersection_num(n0f0n1y2, y3, v0); } } } } } } void automine_kmotif(Graph &g, unsigned k, std::vector<std::vector<uint64_t>> &counters) { std::cout << "Running AutoMine k-motif solver\n"; if (k == 3) { automine_3motif(g, counters); } else if (k == 4) { automine_4motif(g, counters); } else if (k == 5) { automine_5motif(g, counters); } else { std::cout << "Not implemented yet\n"; exit(0); } }
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % ImageMagick Image Resize Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2007 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/draw.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/pixel-private.h" #include "magick/monitor.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/string_.h" #include "magick/utility.h" #include "magick/version.h" /* Typedef declarations. */ typedef struct _ContributionInfo { MagickRealType weight; long pixel; } ContributionInfo; typedef struct _FilterInfo { MagickRealType (*function)(const MagickRealType,const MagickRealType), support; } FilterInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image, % const unsigned long columns,const unsigned long rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o columns: The number of columns in the resized image. % % o rows: The number of rows in the resized image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const unsigned long columns,const unsigned long rows,ExceptionInfo *exception) { #define AdaptiveResizeImageTag "Resize/Image" Image *resize_image; long y; MagickPixelPacket pixel; PointInfo offset; register IndexPacket *resize_indexes; register long x; register PixelPacket *q; ResampleFilter *resample_filter; ViewInfo *resize_view; /* Resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse) { InheritException(exception,&resize_image->exception); resize_image=DestroyImage(resize_image); return((Image *) NULL); } GetMagickPixelPacket(image,&pixel); resample_filter=AcquireResampleFilter(image,exception); resize_view=OpenCacheView(resize_image); for (y=0; y < (long) resize_image->rows; y++) { q=SetCacheView(resize_view,0,y,resize_image->columns,1); if (q == (PixelPacket *) NULL) break; resize_indexes=GetIndexes(resize_image); offset.y=(MagickRealType) (y*image->rows/resize_image->rows); for (x=0; x < (long) resize_image->columns; x++) { offset.x=(MagickRealType) (x*image->columns/resize_image->columns); pixel=ResamplePixelColor(resample_filter,offset.x-0.5,offset.y-0.5); SetPixelPacket(resize_image,&pixel,q,resize_indexes+x); q++; } if (SyncCacheView(resize_view) == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); resize_view=CloseCacheView(resize_view); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0: % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % MagickRealType BesselOrderOne(MagickRealType x) % % A description of each parameter follows: % % o value: Method BesselOrderOne returns the Bessel function of x of the % first kind of orders 1. % % o x: MagickRealType value. % */ static MagickRealType J1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } static MagickRealType P1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType Q1(MagickRealType x) { MagickRealType p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static MagickRealType BesselOrderOne(MagickRealType x) { MagickRealType p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() is a convenience method that scales an image proportionally % to twice its size. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { Image *magnify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); magnify_image=ResizeImage(image,2*image->columns,2*image->rows,CubicFilter, 1.0,exception); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally % to half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,CubicFilter, 1.0,exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resample image to desired resolution. % % Bessel Blackman Box % Catrom Cubic Gaussian % Hanning Hermite Lanczos % Mitchell Point Quandratic % Sinc Triangle % % Most of the filters are FIR (finite impulse response), however, Bessel, % Gaussian, and Sinc are IIR (infinite impulse response). Bessel and Sinc % are windowed (brought down to zero) with the Blackman filter. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: The magick wand. % % o x_resolution: The new image x resolution. % % o y_resolution: The new image y resolution. % % o filter: Image filter to use. % % o blur: The blur factor where > 1 is blurry, < 1 is sharp. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterTypes filter,const double blur, ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; unsigned long height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=(unsigned long) (x_resolution*image->columns/ (image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5); height=(unsigned long) (y_resolution*image->rows/ (image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5); resample_image=ResizeImage(image,width,height,filter,blur,exception); if (resample_image != (Image *) NULL) { resample_image->x_resolution=x_resolution; resample_image->y_resolution=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions with one of these % filters: % % Bessel Blackman Box % Catrom Cubic Gaussian % Hanning Hermite Lanczos % Mitchell Point Quandratic % Sinc Triangle % % Most of the filters are FIR (finite impulse response), however, Bessel, % Gaussian, and Sinc are IIR (infinite impulse response). Bessel and Sinc % are windowed (brought down to zero) with the Blackman filter. % % ResizeImage() was inspired by Paul Heckbert's zoom program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const unsigned long columns, % const unsigned long rows,const FilterTypes filter,const double blur, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o columns: The number of columns in the scaled image. % % o rows: The number of rows in the scaled image. % % o filter: Image filter to use. % % o blur: The blur factor where > 1 is blurry, < 1 is sharp. % % o exception: Return any errors or warnings in this structure. % */ static MagickRealType Bessel(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x == 0.0) return((MagickRealType) (MagickPI/4.0)); return(BesselOrderOne(MagickPI*x)/(2.0*x)); } static MagickRealType Sinc(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x == 0.0) return(1.0); return(sin(MagickPI*(double) x)/(MagickPI*(double) x)); } static MagickRealType Blackman(const MagickRealType x, const MagickRealType magick_unused(support)) { return(0.42+0.5*cos(MagickPI*(double) x)+0.08*cos(2*MagickPI*(double) x)); } static MagickRealType BlackmanBessel(const MagickRealType x, const MagickRealType support) { return(Blackman(x/support,support)*Bessel(x,support)); } static MagickRealType BlackmanSinc(const MagickRealType x, const MagickRealType support) { return(Blackman(x/support,support)*Sinc(x,support)); } static MagickRealType Box(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x < -0.5) return(0.0); if (x < 0.5) return(1.0); return(0.0); } static MagickRealType Catrom(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x < -2.0) return(0.0); if (x < -1.0) return(0.5*(4.0+x*(8.0+x*(5.0+x)))); if (x < 0.0) return(0.5*(2.0+x*x*(-5.0-3.0*x))); if (x < 1.0) return(0.5*(2.0+x*x*(-5.0+3.0*x))); if (x < 2.0) return(0.5*(4.0+x*(-8.0+x*(5.0-x)))); return(0.0); } static MagickRealType Cubic(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x < -2.0) return(0.0); if (x < -1.0) return((2.0+x)*(2.0+x)*(2.0+x)/6.0); if (x < 0.0) return((4.0+x*x*(-6.0-3.0*x))/6.0); if (x < 1.0) return((4.0+x*x*(-6.0+3.0*x))/6.0); if (x < 2.0) return((2.0-x)*(2.0-x)*(2.0-x)/6.0); return(0.0); } static MagickRealType Gaussian(const MagickRealType x, const MagickRealType magick_unused(support)) { return(exp((double) (-2.0*x*x))*sqrt(2.0/MagickPI)); } static MagickRealType Hanning(const MagickRealType x, const MagickRealType magick_unused(support)) { return(0.5+0.5*cos(MagickPI*(double) x)); } static MagickRealType Hamming(const MagickRealType x, const MagickRealType magick_unused(support)) { return(0.54+0.46*cos(MagickPI*(double) x)); } static MagickRealType Hermite(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x < -1.0) return(0.0); if (x < 0.0) return((2.0*(-x)-3.0)*(-x)*(-x)+1.0); if (x < 1.0) return((2.0*x-3.0)*x*x+1.0); return(0.0); } static MagickRealType Lanczos(const MagickRealType x, const MagickRealType support) { if (x < -3.0) return(0.0); if (x < 0.0) return(Sinc(-x,support)*Sinc(-x/3.0,support)); if (x < 3.0) return(Sinc(x,support)*Sinc(x/3.0,support)); return(0.0); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType Mitchell(const MagickRealType x, const MagickRealType magick_unused(support)) { #define B (1.0/3.0) #define C (1.0/3.0) #define P0 (( 6.0- 2.0*B )/6.0) #define P2 ((-18.0+12.0*B+ 6.0*C)/6.0) #define P3 (( 12.0- 9.0*B- 6.0*C)/6.0) #define Q0 (( 8.0*B+24.0*C)/6.0) #define Q1 (( -12.0*B-48.0*C)/6.0) #define Q2 (( 6.0*B+30.0*C)/6.0) #define Q3 (( - 1.0*B- 6.0*C)/6.0) if (x < -2.0) return(0.0); if (x < -1.0) return(Q0-x*(Q1-x*(Q2-x*Q3))); if (x < 0.0) return(P0+x*x*(P2-x*P3)); if (x < 1.0) return(P0+x*x*(P2+x*P3)); if (x < 2.0) return(Q0+x*(Q1+x*(Q2+x*Q3))); return(0.0); } static MagickRealType Quadratic(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x < -1.5) return(0.0); if (x < -0.5) return(0.5*(x+1.5)*(x+1.5)); if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static MagickRealType Triangle(const MagickRealType x, const MagickRealType magick_unused(support)) { if (x < -1.0) return(0.0); if (x < 0.0) return(1.0+x); if (x < 1.0) return(1.0-x); return(0.0); } static MagickBooleanType HorizontalFilter(const Image *image, Image *resize,const MagickRealType x_factor, const FilterInfo *filter_info,const MagickRealType blur, ContributionInfo *contribution,const MagickSizeType span, MagickOffsetType *quantum,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" long j, n, start, stop, x; MagickBooleanType status; MagickPixelPacket pixel, zero; MagickRealType alpha, center, density, gamma, scale, support; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *resize_indexes; register long i, y; register PixelPacket *resize_pixels; /* Apply filter to resize horizontally from image to resize. */ scale=blur*MagickMax(1.0/x_factor,1.0); support=scale*filter_info->support; resize->storage_class=image->storage_class; if (support > 0.5) { if (SetImageStorageClass(resize,DirectClass) == MagickFalse) { InheritException(exception,&resize->exception); return(MagickFalse); } } else { /* Reduce to point sampling. */ support=(MagickRealType) (0.5+MagickEpsilon); scale=1.0; } scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); for (x=0; x < (long) resize->columns; x++) { center=(MagickRealType) (x+0.5)/x_factor; start=(long) (MagickMax(center-support,0.0)+0.5); stop=(long) (MagickMin(center+support,(double) image->columns)+0.5); density=0.0; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=filter_info->function(scale*((MagickRealType) (start+n)-center+0.5),filter_info->support); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[i].weight*=density; } pixels=AcquireImagePixels(image,contribution[0].pixel,0,(unsigned long) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); resize_pixels=SetImagePixels(resize,x,0,1,resize->rows); if ((pixels == (const PixelPacket *) NULL) || (resize_pixels == (PixelPacket *) NULL)) break; indexes=AcquireIndexes(image); resize_indexes=GetIndexes(resize); #pragma omp parallel for private(alpha, gamma, i, j, pixel) for (y=0; y < (long) resize->rows; y++) { pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=alpha*(pixels+j)->opacity; } resize_pixels[y].red=RoundToQuantum(pixel.red); resize_pixels[y].green=RoundToQuantum(pixel.green); resize_pixels[y].blue=RoundToQuantum(pixel.blue); resize_pixels[y].opacity=RoundToQuantum(pixel.opacity); } else { gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=contribution[i].weight*(pixels+j)->opacity; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_pixels[y].red=RoundToQuantum(gamma*pixel.red); resize_pixels[y].green=RoundToQuantum(gamma*pixel.green); resize_pixels[y].blue=RoundToQuantum(gamma*pixel.blue); resize_pixels[y].opacity=RoundToQuantum(pixel.opacity); } if ((image->colorspace == CMYKColorspace) && (resize->colorspace == CMYKColorspace)) { if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight; pixel.index+=alpha*indexes[j]; } resize_indexes[y]=(IndexPacket) RoundToQuantum(pixel.index); } else { gamma=0.0; for (i=0; i < n; i++) { j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i].pixel-contribution[0].pixel); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.index+=alpha*indexes[j]; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_indexes[y]=(IndexPacket) RoundToQuantum(gamma*pixel.index); } } if ((resize->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[i-start].pixel-contribution[0].pixel); resize_indexes[y]=indexes[j]; } } if (SyncImagePixels(resize) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(*quantum,span) != MagickFalse)) { status=image->progress_monitor(ResizeImageTag,(MagickOffsetType) *quantum,span,image->client_data); if (status == MagickFalse) break; } (*quantum)++; } return(x == (long) resize->columns ? MagickTrue : MagickFalse); } static MagickBooleanType VerticalFilter(const Image *image,Image *resize, const MagickRealType y_factor,const FilterInfo *filter_info, const MagickRealType blur,ContributionInfo *contribution, const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception) { long j, n, start, stop, y; MagickBooleanType status; MagickPixelPacket pixel, zero; MagickRealType alpha, center, density, gamma, scale, support; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *resize_indexes; register long i, x; register PixelPacket *resize_pixels; /* Apply filter to resize vertically from image to resize. */ scale=blur*MagickMax(1.0/y_factor,1.0); support=scale*filter_info->support; resize->storage_class=image->storage_class; if (support > 0.5) { if (SetImageStorageClass(resize,DirectClass) == MagickFalse) { InheritException(exception,&resize->exception); return(MagickFalse); } } else { /* Reduce to point sampling. */ support=(MagickRealType) (0.5+MagickEpsilon); scale=1.0; } scale=1.0/scale; (void) ResetMagickMemory(&zero,0,sizeof(zero)); for (y=0; y < (long) resize->rows; y++) { center=(MagickRealType) (y+0.5)/y_factor; start=(long) (MagickMax(center-support,0.0)+0.5); stop=(long) (MagickMin(center+support,(double) image->rows)+0.5); density=0.0; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=filter_info->function(scale* ((MagickRealType) (start+n)-center+0.5),filter_info->support); density+=contribution[n].weight; } if ((density != 0.0) && (density != 1.0)) { /* Normalize. */ density=1.0/density; for (i=0; i < n; i++) contribution[i].weight*=density; } pixels=AcquireImagePixels(image,0,contribution[0].pixel,image->columns, (unsigned long) (contribution[n-1].pixel-contribution[0].pixel+1), exception); resize_pixels=SetImagePixels(resize,0,y,resize->columns,1); if ((pixels == (const PixelPacket *) NULL) || (resize_pixels == (PixelPacket *) NULL)) break; indexes=AcquireIndexes(image); resize_indexes=GetIndexes(resize); #pragma omp parallel for private(alpha, gamma, i, j, pixel) for (x=0; x < (long) resize->columns; x++) { gamma=0.0; pixel=zero; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=alpha*(pixels+j)->opacity; } resize_pixels[x].red=RoundToQuantum(pixel.red); resize_pixels[x].green=RoundToQuantum(pixel.green); resize_pixels[x].blue=RoundToQuantum(pixel.blue); resize_pixels[x].opacity=RoundToQuantum(pixel.opacity); } else { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.red+=alpha*(pixels+j)->red; pixel.green+=alpha*(pixels+j)->green; pixel.blue+=alpha*(pixels+j)->blue; pixel.opacity+=contribution[i].weight*(pixels+j)->opacity; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_pixels[x].red=RoundToQuantum(gamma*pixel.red); resize_pixels[x].green=RoundToQuantum(gamma*pixel.green); resize_pixels[x].blue=RoundToQuantum(gamma*pixel.blue); resize_pixels[x].opacity=RoundToQuantum(pixel.opacity); } if ((image->colorspace == CMYKColorspace) && (resize->colorspace == CMYKColorspace)) { gamma=0.0; if (image->matte == MagickFalse) { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight; pixel.index+=alpha*indexes[j]; } resize_indexes[x]=(IndexPacket) RoundToQuantum(pixel.index); } else { for (i=0; i < n; i++) { j=(long) ((contribution[i].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[i].weight*QuantumScale*((MagickRealType) QuantumRange-(pixels+j)->opacity); pixel.index+=alpha*indexes[j]; gamma+=alpha; } gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma); resize_indexes[x]=(IndexPacket) RoundToQuantum(gamma*pixel.index); } } if ((resize->storage_class == PseudoClass) && (image->storage_class == PseudoClass)) { i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop- 1.0)+0.5); j=(long) ((contribution[i-start].pixel-contribution[0].pixel)* image->columns+x); resize_indexes[x]=indexes[j]; } } if (SyncImagePixels(resize) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(*quantum,span) != MagickFalse)) { status=image->progress_monitor(ResizeImageTag,(MagickOffsetType) *quantum,span,image->client_data); if (status == MagickFalse) break; } (*quantum)++; } return(y == (long) resize->rows ? MagickTrue : MagickFalse); } MagickExport Image *ResizeImage(const Image *image,const unsigned long columns, const unsigned long rows,const FilterTypes filter,const double blur, ExceptionInfo *exception) { ContributionInfo *contribution; Image *filter_image, *resize_image; MagickRealType support, x_factor, x_support, y_factor, y_support; MagickSizeType span; MagickStatusType status; register long i; static const FilterInfo filters[SincFilter+1] = { { Box, 0.0f }, { Box, 0.0f }, { Box, 0.5f }, { Triangle, 1.0f }, { Hermite, 1.0f }, { Hanning, 1.0f }, { Hamming, 1.0f }, { Blackman, 1.0f }, { Gaussian, 1.25f }, { Quadratic, 1.5f }, { Cubic, 2.0f }, { Catrom, 2.0f }, { Mitchell, 2.0f }, { Lanczos, 3.0f }, { BlackmanBessel, 3.2383f }, { BlackmanSinc, 4.0f } }; MagickOffsetType quantum; /* Initialize resize image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); assert(((int) filter >= 0) && ((int) filter <= SincFilter)); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter) && (blur == 1.0)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); /* Allocate filter contribution info. */ x_factor=(MagickRealType) resize_image->columns/(MagickRealType) image->columns; y_factor=(MagickRealType) resize_image->rows/(MagickRealType) image->rows; i=(long) LanczosFilter; if (filter != UndefinedFilter) i=(long) filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) i=(long) PointFilter; else if ((image->storage_class == PseudoClass) || (image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0)) i=(long) MitchellFilter; x_support=blur*MagickMax(1.0/x_factor,1.0)*filters[i].support; y_support=blur*MagickMax(1.0/y_factor,1.0)*filters[i].support; support=MagickMax(x_support,y_support); if (support < filters[i].support) support=filters[i].support; contribution=(ContributionInfo *) AcquireQuantumMemory((size_t) (2.0*MagickMax(support,0.5)+3.0),sizeof(*contribution)); if (contribution == (ContributionInfo *) NULL) { resize_image=DestroyImage(resize_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Resize image. */ quantum=0; if ((columns*((MagickSizeType) image->rows+rows)) > (rows*((MagickSizeType) image->columns+columns))) { filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { contribution=(ContributionInfo *) RelinquishMagickMemory( contribution); resize_image=DestroyImage(resize_image); return((Image *) NULL); } span=(MagickSizeType) (filter_image->columns+resize_image->rows); status=HorizontalFilter(image,filter_image,x_factor,&filters[i],blur, contribution,span,&quantum,exception); status|=VerticalFilter(filter_image,resize_image,y_factor,&filters[i],blur, contribution,span,&quantum,exception); } else { filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { contribution=(ContributionInfo *) RelinquishMagickMemory( contribution); resize_image=DestroyImage(resize_image); return((Image *) NULL); } span=(MagickSizeType) (resize_image->columns+filter_image->rows); status=VerticalFilter(image,filter_image,y_factor,&filters[i],blur, contribution,span,&quantum,exception); status|=HorizontalFilter(filter_image,resize_image,x_factor,&filters[i], blur,contribution,span,&quantum,exception); } /* Free allocated memory. */ contribution=(ContributionInfo *) RelinquishMagickMemory(contribution); filter_image=DestroyImage(filter_image); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o columns: The number of columns in the sampled image. % % o rows: The number of rows in the sampled image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" Image *sample_image; long j, *x_offset, y, *y_offset; MagickBooleanType status; register const IndexPacket *indexes; register const PixelPacket *pixels; register IndexPacket *sample_indexes; register long x; register PixelPacket *sample_pixels; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Allocate scan line buffer and column offset buffers. */ x_offset=(long *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); y_offset=(long *) AcquireQuantumMemory((size_t) sample_image->rows, sizeof(*y_offset)); if ((x_offset == (long *) NULL) || (y_offset == (long *) NULL)) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize pixel offsets. */ for (x=0; x < (long) sample_image->columns; x++) x_offset[x]=(long) (((MagickRealType) x+0.5)*image->columns/ sample_image->columns); for (y=0; y < (long) sample_image->rows; y++) y_offset[y]=(long) (((MagickRealType) y+0.5)*image->rows/ sample_image->rows); /* Sample each row. */ j=(-1); pixels=AcquireImagePixels(image,0,0,image->columns,1,exception); indexes=AcquireIndexes(image); for (y=0; y < (long) sample_image->rows; y++) { sample_pixels=SetImagePixels(sample_image,0,y,sample_image->columns,1); if (sample_pixels == (PixelPacket *) NULL) break; sample_indexes=GetIndexes(sample_image); if (j != y_offset[y]) { /* Read a scan line. */ j=y_offset[y]; pixels=AcquireImagePixels(image,0,j,image->columns,1,exception); if (pixels == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); } /* Sample each column. */ for (x=0; x < (long) sample_image->columns; x++) sample_pixels[x]=pixels[x_offset[x]]; if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) for (x=0; x < (long) sample_image->columns; x++) sample_indexes[x]=indexes[x_offset[x]]; if (SyncImagePixels(sample_image) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(y,image->rows) != MagickFalse)) { status=image->progress_monitor(SampleImageTag,y,image->rows, image->client_data); if (status == MagickFalse) break; } } y_offset=(long *) RelinquishMagickMemory(y_offset); x_offset=(long *) RelinquishMagickMemory(x_offset); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o columns: The number of columns in the scaled image. % % o rows: The number of rows in the scaled image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" Image *scale_image; long number_rows, y; MagickBooleanType next_column, next_row, status; MagickPixelPacket pixel, *scale_scanline, *scanline, *x_vector, *y_vector, zero; PointInfo scale, span; register const IndexPacket *indexes; register const PixelPacket *p; register IndexPacket *scale_indexes; register long i, x; register MagickPixelPacket *s, *t; register PixelPacket *q; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse) { InheritException(exception,&scale_image->exception); scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*scanline)); scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) scale_image->columns,sizeof(*scale_scanline)); y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns, sizeof(*y_vector)); if ((scanline == (MagickPixelPacket *) NULL) || (scale_scanline == (MagickPixelPacket *) NULL) || (x_vector == (MagickPixelPacket *) NULL) || (y_vector == (MagickPixelPacket *) NULL)) { scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) image->columns* sizeof(*y_vector)); GetMagickPixelPacket(image,&pixel); (void) ResetMagickMemory(&zero,0,sizeof(zero)); i=0; for (y=0; y < (long) scale_image->rows; y++) { q=SetImagePixels(scale_image,0,y,scale_image->columns,1); if (q == (PixelPacket *) NULL) break; scale_indexes=GetIndexes(scale_image); if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=AcquireImagePixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (long) image->rows)) { /* Read a new scanline. */ p=AcquireImagePixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } number_rows++; } for (x=0; x < (long) image->columns; x++) { y_vector[x].red+=scale.y*x_vector[x].red; y_vector[x].green+=scale.y*x_vector[x].green; y_vector[x].blue+=scale.y*x_vector[x].blue; if (scale_image->matte != MagickFalse) y_vector[x].opacity+=scale.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) y_vector[x].index+=scale.y*x_vector[x].index; } span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (long) image->rows)) { /* Read a new scanline. */ p=AcquireImagePixels(image,0,i++,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=AcquireIndexes(image); for (x=0; x < (long) image->columns; x++) { x_vector[x].red=(MagickRealType) p->red; x_vector[x].green=(MagickRealType) p->green; x_vector[x].blue=(MagickRealType) p->blue; if (image->matte != MagickFalse) x_vector[x].opacity=(MagickRealType) p->opacity; if (indexes != (IndexPacket *) NULL) x_vector[x].index=(MagickRealType) indexes[x]; p++; } number_rows++; next_row=MagickFalse; } s=scanline; for (x=0; x < (long) image->columns; x++) { pixel.red=y_vector[x].red+span.y*x_vector[x].red; pixel.green=y_vector[x].green+span.y*x_vector[x].green; pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue; if (image->matte != MagickFalse) pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index=y_vector[x].index+span.y*x_vector[x].index; s->red=pixel.red; s->green=pixel.green; s->blue=pixel.blue; if (scale_image->matte != MagickFalse) s->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) s->index=pixel.index; s++; y_vector[x]=zero; } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ s=scanline; for (x=0; x < (long) scale_image->columns; x++) { q->red=RoundToQuantum(s->red); q->green=RoundToQuantum(s->green); q->blue=RoundToQuantum(s->blue); if (scale_image->matte != MagickFalse) q->opacity=RoundToQuantum(s->opacity); if (scale_indexes != (IndexPacket *) NULL) scale_indexes[x]=(IndexPacket) RoundToQuantum(s->index); q++; s++; } } else { /* Scale X direction. */ pixel=zero; next_column=MagickFalse; span.x=1.0; s=scanline; t=scale_scanline; for (x=0; x < (long) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { pixel=zero; t++; } pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { pixel=zero; next_column=MagickFalse; t++; } pixel.red+=scale.x*s->red; pixel.green+=scale.x*s->green; pixel.blue+=scale.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=scale.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=scale.x*s->index; span.x-=scale.x; } s++; } if (span.x > 0) { s--; pixel.red+=span.x*s->red; pixel.green+=span.x*s->green; pixel.blue+=span.x*s->blue; if (scale_image->matte != MagickFalse) pixel.opacity+=span.x*s->opacity; if (scale_indexes != (IndexPacket *) NULL) pixel.index+=span.x*s->index; } if ((next_column == MagickFalse) && ((long) (t-scale_scanline) < (long) scale_image->columns)) { t->red=pixel.red; t->green=pixel.green; t->blue=pixel.blue; if (scale_image->matte != MagickFalse) t->opacity=pixel.opacity; if (scale_indexes != (IndexPacket *) NULL) t->index=pixel.index; } /* Transfer scanline to scaled image. */ t=scale_scanline; for (x=0; x < (long) scale_image->columns; x++) { q->red=RoundToQuantum(t->red); q->green=RoundToQuantum(t->green); q->blue=RoundToQuantum(t->blue); if (scale_image->matte != MagickFalse) q->opacity=RoundToQuantum(t->opacity); if (scale_indexes != (IndexPacket *) NULL) scale_indexes[x]=(IndexPacket) RoundToQuantum(t->index); t++; q++; } } if (SyncImagePixels(scale_image) == MagickFalse) break; if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (QuantumTick(y,image->rows) != MagickFalse)) { status=image->progress_monitor(ScaleImageTag,y,image->rows, image->client_data); if (status == MagickFalse) break; } } /* Free allocated memory. */ y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector); scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline); x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o columns: The number of columns in the scaled image. % % o rows: The number of rows in the scaled image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image, const unsigned long columns,const unsigned long rows,ExceptionInfo *exception) { char value[MaxTextExtent]; const char *attribute; Image *sample_image, *thumbnail_image; MagickRealType x_factor, y_factor; struct stat attributes; unsigned long version; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); x_factor=(MagickRealType) columns/(MagickRealType) image->columns; y_factor=(MagickRealType) rows/(MagickRealType) image->rows; if ((x_factor*y_factor) > 0.1) { thumbnail_image=ZoomImage(image,columns,rows,exception); if (thumbnail_image != (Image *) NULL) (void) StripImage(thumbnail_image); return(thumbnail_image); } sample_image=SampleImage(image,5*columns,5*rows,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ZoomImage(sample_image,columns,rows,exception); sample_image=DestroyImage(sample_image); if (thumbnail_image == (Image *) NULL) return(thumbnail_image); if (thumbnail_image->matte == MagickFalse) (void) SetImageOpacity(thumbnail_image,OpaqueOpacity); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; (void) StripImage(thumbnail_image); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (strstr(image->magick_filename,"///") == (char *) NULL) (void) FormatMagickString(value,MaxTextExtent,"file:///%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value); (void) CopyMagickString(value,image->magick_filename,MaxTextExtent); if (stat(image->filename,&attributes) == 0) { (void) FormatMagickString(value,MaxTextExtent,"%ld",attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value); } (void) FormatMagickString(value,MaxTextExtent,"%ld",attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value); (void) FormatMagickString(value,MaxTextExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value); attribute=GetImageProperty(image,"comment"); if ((attribute != (const char *) NULL) && (value != (char *) NULL)) (void) SetImageProperty(thumbnail_image,"Description",value); (void) SetImageProperty(thumbnail_image,"Software", GetMagickVersion(&version)); (void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value); (void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value); (void) FormatMagickString(value,MaxTextExtent,"%lu", GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value); return(thumbnail_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z o o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZoomImage() creates a new image that is a scaled size of an existing one. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. The Point filter gives fast pixel replication, % Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower, % very high-quality results. See Graphic Gems III for details on this % algorithm. % % The filter member of the Image structure specifies which image filter to % use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp. % % The format of the ZoomImage method is: % % Image *ZoomImage(const Image *image,const unsigned long columns, % const unsigned long rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o zoom_image: Method ZoomImage returns a pointer to the image after % scaling. A null image is returned if there is a memory shortage. % % o image: The image. % % o columns: An integer that specifies the number of columns in the zoom % image. % % o rows: An integer that specifies the number of rows in the scaled % image. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *ZoomImage(const Image *image,const unsigned long columns, const unsigned long rows,ExceptionInfo *exception) { Image *zoom_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); return(zoom_image); }
nr_numint.c
/* Copyright 2014-2020 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <assert.h> #include "config.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" #define BOXSIZE 56 int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice, int *ao_loc) { if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) { return 0; } const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int bas_id; int box_id = 0; int bound = BOXSIZE; int has0 = 0; empty[box_id] = 1; for (bas_id = sh0; bas_id < sh1; bas_id++) { empty[box_id] &= !non0table[bas_id]; if (ao_loc[bas_id] == bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = 1; } else if (ao_loc[bas_id] > bound) { has0 |= empty[box_id]; box_id++; bound += BOXSIZE; empty[box_id] = !non0table[bas_id]; } } return has0; } static void dot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int bgrids, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; double beta = 0; if (has0) { int box_id, blen, i, j; size_t b0; for (box_id = 0; box_id < nbox; box_id++) { if (!empty[box_id]) { b0 = box_id * BOXSIZE; blen = MIN(nao-b0, BOXSIZE); dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen, &D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc, &beta, vm, &ngrids); beta = 1.0; } } if (beta == 0) { // all empty for (i = 0; i < nocc; i++) { for (j = 0; j < bgrids; j++) { vm[i*ngrids+j] = 0; } } } } else { dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao, &D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids); } } /* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */ void VXCdot_ao_dm(double *vm, double *ao, double *dm, int nao, int nocc, int ngrids, int nbas, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel { int ip, ib; #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_dm(vm+ip, ao+ip, dm, nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE), non0table+ib*nbas, shls_slice, ao_loc); } } } /* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */ static void dot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int bgrids, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { int nbox = (nao+BOXSIZE-1) / BOXSIZE; char empty[nbox]; int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc); const char TRANS_T = 'T'; const char TRANS_N = 'N'; const double D1 = 1; if (has0) { int ib, jb, leni, lenj; int j1 = nbox; size_t b0i, b0j; for (ib = 0; ib < nbox; ib++) { if (!empty[ib]) { b0i = ib * BOXSIZE; leni = MIN(nao-b0i, BOXSIZE); if (hermi) { j1 = ib + 1; } for (jb = 0; jb < j1; jb++) { if (!empty[jb]) { b0j = jb * BOXSIZE; lenj = MIN(nao-b0j, BOXSIZE); dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1, ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids, &D1, vv+b0i*nao+b0j, &nao); } } } } } else { dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids, &D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao); } } /* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */ void VXCdot_ao_ao(double *vv, double *ao1, double *ao2, int nao, int ngrids, int nbas, int hermi, unsigned char *non0table, int *shls_slice, int *ao_loc) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; memset(vv, 0, sizeof(double) * nao * nao); #pragma omp parallel { int ip, ib; double *v_priv = calloc(nao*nao+2, sizeof(double)); #pragma omp for nowait schedule(static) for (ib = 0; ib < nblk; ib++) { ip = ib * BLKSIZE; dot_ao_ao(v_priv, ao1+ip, ao2+ip, nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi, non0table+ib*nbas, shls_slice, ao_loc); } #pragma omp critical { for (ip = 0; ip < nao*nao; ip++) { vv[ip] += v_priv[ip]; } } free(v_priv); } if (hermi != 0) { NPdsymm_triu(nao, vv, hermi); } } // 'nip,np->ip' void VXC_dscale_ao(double *aow, double *ao, double *wv, int comp, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; size_t ao_size = nao * Ngrids; int i, j, ic; double *pao = ao; #pragma omp for schedule(static) for (i = 0; i < nao; i++) { pao = ao + i * Ngrids; for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] = pao[j] * wv[j]; } for (ic = 1; ic < comp; ic++) { for (j = 0; j < Ngrids; j++) { aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j]; } } } } } // 'ip,ip->p' void VXC_dcontract_rho(double *rho, double *bra, double *ket, int nao, int ngrids) { #pragma omp parallel { size_t Ngrids = ngrids; int nthread = omp_get_num_threads(); int blksize = MAX((Ngrids+nthread-1) / nthread, 1); int ib, b0, b1, i, j; #pragma omp for for (ib = 0; ib < nthread; ib++) { b0 = ib * blksize; b1 = MIN(b0 + blksize, ngrids); for (j = b0; j < b1; j++) { rho[j] = bra[j] * ket[j]; } for (i = 1; i < nao; i++) { for (j = b0; j < b1; j++) { rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j]; } } } } } void VXC_vv10nlc(double *Fvec, double *Uvec, double *Wvec, double *vvcoords, double *coords, double *W0p, double *W0, double *K, double *Kp, double *RpW, int vvngrids, int ngrids) { #pragma omp parallel { double DX, DY, DZ, R2; double gp, g, gt, T, F, U, W; int i, j; #pragma omp for schedule(static) for (i = 0; i < ngrids; i++) { F = 0; U = 0; W = 0; for (j = 0; j < vvngrids; j++) { DX = vvcoords[j*3+0] - coords[i*3+0]; DY = vvcoords[j*3+1] - coords[i*3+1]; DZ = vvcoords[j*3+2] - coords[i*3+2]; R2 = DX*DX + DY*DY + DZ*DZ; gp = R2*W0p[j] + Kp[j]; g = R2*W0[i] + K[i]; gt = g + gp; T = RpW[j] / (g*gp*gt); F += T; T *= 1./g + 1./gt; U += T; W += T * R2; } Fvec[i] = F * -1.5; Uvec[i] = U; Wvec[i] = W; } } }
begin_declare_variant_range_withouth_end.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // TODO: Issue an eror message as the end is missing // expected-no-diagnostics #pragma omp begin declare variant match(device={kind(cpu)}) int also_before(void) { return 0; } #pragma omp begin declare variant match(device={kind(gpu)}) int also_after(void) { return 2; } int also_before(void) { return 2; } #pragma omp end declare variant #pragma omp begin declare variant match(device={kind(fpga)}) This text is never parsed! #pragma omp end declare variant int also_after(void) { return 0; } int test() { return also_after() + also_before(); }
layer.h
#ifndef __LAYER_H__ #define __LAYER_H__ #ifndef __SYNTHESIS__ #include <iostream> #include <cassert> #include <cmath> #include <limits> template <typename T, int C, int H, int W> inline void Debug_print( T in[C*H*W], const std::string op ){ std::cout << std::fixed; std::cout << op << "\n"; for(int i=0; i<C; i++){ std::cout << "Channel " << i << "\n"; for(int j=0; j<H; j++){ for(int k=0; k<W; k++){ //std::cout << std::setw(8) << std::setprecision(6) << in[i*H*W + j*W + k] << ","; std::cout << std::setw(8) << std::setprecision(6) << in[j*W*C + k*C + i] << ","; } std::cout << "\n"; } } std::cout << std::flush; } #endif #include <hls_stream.h> template <typename T, int L> class PIPE{ private: T buf[L]; int idx; public: PIPE() : idx(0){for(int i=0; i<L; i++) this->buf[i]=0;} T push_and_pop(T val){ #pragma HLS inline region int oldest = (idx + 1) < L ? (idx + 1) : 0; T ret = buf[oldest]; buf[oldest] = val; idx = (idx < L-1) ? idx+1 : 0; return ret; } T* getPipe(void){ return this->buf; } }; template <typename T, int H, int W, int K, int TShiftReg> class ConvShiftReg{ private: T ShiftReg[K*K]; PIPE<T, W-K> pipes[K-1]; T LUTShiftReg[(K-1)*W+K]; public: ConvShiftReg(){for(int i=0; i<K*K; i++) this->ShiftReg[i] = 0;} void copyShiftReg(T* array){ if constexpr(TShiftReg == 0){ for(int i=0; i<K*K; i++){ #pragma HLS UNROLL array[i] = ShiftReg[K*K-1-i]; } }else{ for(int i=0; i<K; i++){ #pragma HLS UNROLL for(int j=0; j<K; j++){ array[i*K+j] = LUTShiftReg[(K-1)*W+K-1-(i*W+j)]; } } } } void push(T val){ if constexpr(TShiftReg == 0){ #pragma HLS ARRAY_PARTITION variable=ShiftReg complete dim=1 #pragma HLS inline region T din = val; for(int i=0; i<K; i++){ #pragma HLS UNROLL T tmp = ShiftReg[i*K+K-1]; for(int j=K-2; j>=0; j--){ #pragma HLS UNROLL ShiftReg[i*K + j+1] = ShiftReg[i*K + j]; } ShiftReg[i*K] = din; if(i < K-1) din = pipes[i].push_and_pop(tmp); } }else{ #pragma HLS ARRAY_PARTITION variable=LUTShiftReg complete dim=1 #pragma HLS inline region for(int i=(K-1)*W+K-2; i>=0; i--){ #pragma HLS UNROLL LUTShiftReg[i+1] = LUTShiftReg[i]; } LUTShiftReg[0] = val; } } }; // 222195 // template <typename T, int L> // T sum(T* array) // { // if constexpr(L==1){ // return array[0]; // } // else if constexpr(L%2 == 0){ // return sum<T, L/2>(&array[0]) + sum<T, L/2>(&array[L/2]); // } // else{ // return sum<T, L/2>(&array[0]) + sum<T, L/2>(&array[L/2]) + array[L-1]; // } // } // 189667 template <typename T, int L> T sum(T* array) { T ret = 0; for(int i=0; i<L; i++) ret += array[i]; return ret; } template <typename TA, typename TB, typename TR, int L> TR inner_product(const TA* a, const TB* b) { TR ret = 0; for(int i=0; i<L; i++){ ret += a[i] * b[i]; } return ret; } // slow // template <typename TA, typename TB, typename TR, int L> // TR inner_product(const TA* a, const TB* b) // { // if constexpr(L==1){ // return a[0] * b[0]; // } // else if constexpr(L%2 == 0){ // return inner_product<TA, TB, TR, L/2>(&a[0], &b[0]) + inner_product<TA, TB, TR, L/2>(&a[L/2], &b[L/2]); // } // else{ // return inner_product<TA, TB, TR, L/2>(&a[0], &b[0]) + inner_product<TA, TB, TR, L/2>(&a[L/2], &b[L/2]) + a[L-1]*b[L-1]; // } // } template <typename IT, typename OT, typename WT, typename BT, int IC, int IH, int IW, int OC, int OH, int OW, int K, int S, int P> inline void Conv( hls::stream<IT> &in, hls::stream<OT> &out, const WT weight[OC*IC*K*K], const BT bias[OC] ){ //#pragma HLS DATAFLOW ConvShiftReg<IT, IH+2*P, IW+2*P, K, 1> shift_reg[IC]; IT mat_buf[IC*K*K]; OT gemm_buf[IC*OC]; //OT debug_buf[OC*OH*OW]; for(int h=-P; h<IH+P; h++){ for(int w=-P; w<IW+P; w++){ int i = h - (K/2)*2 + P; int j = w - (K/2)*2 + P; for(int ic=0; ic<IC; ic++){ IT in_tmp = 0; if(0<=h && h<IH && 0<=w && w<IW) in_tmp = in.read(); shift_reg[ic].push(in_tmp); shift_reg[ic].copyShiftReg(&mat_buf[ic*K*K]); for(int oc=0; oc<OC; oc++){ gemm_buf[oc*IC + ic] = inner_product<IT, WT, OT, K*K>(&mat_buf[ic*K*K], &weight[oc*IC*K*K + ic*K*K]); } } for(int oc=0; oc<OC; oc++){ OT tmp = sum<OT, IC>(&gemm_buf[oc*IC]) + bias[oc]; if(0 <= i && i < OH && 0 <= j && j < OW){ out.write(tmp); //debug_buf[i*OW*OC + j*OC + oc] = tmp; } } } } //Debug_print<OT, OC, OH, OW>(debug_buf, "Conv"); } template <typename IT, typename OT, typename WT, typename BT, int IC, int IH, int IW, int OC, int OH, int OW, int K, int S, int P> inline void Convolver( IT in[IC*IW*IH], OT out[OC*OW*OH], const WT weight[OC*IC*K*K], const BT bias[OC] ){ ConvShiftReg<IT, IH+2*P, IW+2*P, K, 0> shift_reg[IC]; IT mat_buf[IC*K*K]; OT gemm_buf[IC*OC]; for(int h=-P; h<IH+P; h++){ for(int w=-P; w<IW+P; w++){ int i = h - (K/2)*2 + P; int j = w - (K/2)*2 + P; for(int ic=0; ic<IC; ic++){ if(0<=h && h<IH && 0<=w && w<IW) shift_reg[ic].push(in[h*IW*IC + w*IC + ic]); else shift_reg[ic].push(0); if(0 <= i && i < OH && 0 <= j && j < OW){ shift_reg[ic].copyShiftReg(&mat_buf[ic*K*K]); for(int oc=0; oc<OC; oc++){ gemm_buf[oc*IC + ic] = inner_product<IT, WT, OT, K*K>(&mat_buf[ic*K*K], &weight[oc*IC*K*K + ic*K*K]); } } } for(int oc=0; oc<OC; oc++){ if(0 <= i && i < OH && 0 <= j && j < OW){ out[i*OW*OC + j*OC + oc] = sum<OT, IC>(&gemm_buf[oc*IC]); out[i*OW*OC + j*OC + oc] += bias[oc]; } } } } } template <typename IT, typename OT, typename WT, typename BT, int IC, int IH, int IW, int OC, int OH, int OW, int K, int S, int P> inline void Conv( IT in[IC*IW*IH], OT out[OC*OW*OH], const WT weight[OC*IC*K*K], const BT bias[OC] ) { #pragma omp parallel { const int G = 1; #pragma omp for for(int h=0; h<OH; h+=S){ for(int w=0; w<OW; w+=S){ for(int oc=0; oc<OC; oc++){ out[h*OW*OC + w*OC + oc] = bias[oc]; //for(int ic=(IC/G*oc)%OC; ic<(IC/G*(oc+1))%OC; ic++){ for(int ic=0; ic<IC; ic++){ // in channel for(int m=-K/2+(K+1)%2; m<=K/2; m++){ // kernel size for(int n=-K/2+(K+1)%2; n<=K/2; n++){ // kernel size int i = h+m+(IH-OH)/2; int j = w+n+(IW-OW)/2; if(i<0 || j<0 || i>=IH || j>=IW){ continue; } int k = m+K/2-(K+1)%2; int l = n+K/2-(K+1)%2; //out[h*OW*OC + w*OC + oc] += in[i*IW*IC + j*IC + ic] * weight[oc*IC*K*K + ic*K*K + k*K + l]; out[h*OW*OC + w*OC + oc] += in[i*IW*IC + j*IC + ic] * weight[oc*IC*K*K + (ic/G)*K*K + k*K + l]; } } } } } } } } template <typename IT, typename OT, typename WT, typename BT, int IC, int IH, int IW, int OC, int OH, int OW> inline void BatchNorm( IT in[IC*IW*IH], OT out[OC*OW*OH], const WT weight[OC], const BT bias[OC] ){ #pragma omp parallel { #pragma omp for for(int oc=0; oc<OC; oc++){ // out channel for(int w=0; w<OW; w++){ // input data width for(int h=0; h<OH; h++){ // input data height out[oc*OH*OW + w*OH + h] = (in[oc*OH*OW + w*OH + h] * weight[oc]) + bias[oc]; } } } } } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW> inline void Relu( IT in [IC*IH*IW], OT out[OC*OH*OW] ){ #ifdef _OPENMP #pragma omp parallel #endif { #ifdef _OPENMP #pragma omp for #endif for(int c=0; c<IC; c++) // out channel for(int h=0; h<IH; h++) // input data width for(int w=0; w<IW; w++) // input data height if(in[c*IH*IW + h*IW + w] < 0) out[c*OH*OW + h*OW + w] = 0; else out[c*OH*OW + h*OW + w] = in[c*OH*OW + h*OW + w]; } } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW> inline void Relu( hls::stream<IT> &in, hls::stream<OT> &out ){ //#pragma HLS DATAFLOW for(int c=0; c<IC; c++){ for(int h=0; h<IH; h++){ for(int w=0; w<IW; w++){ IT tmp = in.read(); if(tmp > 0) out.write(tmp); else out.write(0); } } } } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW, int K, int S, int P> inline void MaxPool( hls::stream<IT> &in, hls::stream<OT> &out ){ //#pragma HLS DATAFLOW ConvShiftReg<IT, IH, IW, K, 1> shift_reg[IC]; IT buf[IC*K*K]; //OT debug_buf[OC*OH*OW]; for(int h=0; h<IH; h++){ for(int w=0; w<IW; w++){ for(int ic=0; ic<IC; ic++){ shift_reg[ic].push(in.read()); if(h >= OH*K || w >= OW*K) continue; if((h%K) == (K-1) && (w%K) == (K-1)){ shift_reg[ic].copyShiftReg(&buf[ic*K*K]); OT tmp = buf[ic*K*K]; for(int k=1; k<K*K; k++){ if(buf[ic*K*K + k] > tmp) tmp = buf[ic*K*K + k]; } out.write(tmp); //debug_buf[(h/K)*OW*OC + (w/K)*OC + ic] = tmp; } } } } //Debug_print<OT, OC, OH, OW>(debug_buf, "MaxPool"); } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW, int K, int S, int P> inline void MaxPooler( IT in [IC*IH*IW], OT out[OC*OH*OW] ){ ConvShiftReg<IT, IH, IW, K, 1> shift_reg[IC]; IT buf[IC*K*K]; for(int h=0; h<IH; h++){ for(int w=0; w<IW; w++){ for(int ic=0; ic<IC; ic++){ shift_reg[ic].push(in[h*IW*IC + w*IC + ic]); if(h >= OH*K || w >= OW*K) continue; if((h%K) == (K-1) && (w%K) == (K-1)){ shift_reg[ic].copyShiftReg(&buf[ic*K*K]); OT tmp = buf[ic*K*K]; for(int k=1; k<K*K; k++){ if(buf[ic*K*K + k] > tmp) tmp = buf[ic*K*K + k]; } //out.write(tmp); out[(h/K)*OW*OC + (w/K)*OC + ic] = tmp; } } } } } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW, int K, int S, int P> inline void MaxPool( IT in [IC*IH*IW], OT out[OC*OH*OW] ){ #pragma omp parallel { #pragma omp for for(int h=0; h<IH; h+=S){ // data width for(int w=0; w<IW; w+=S){ // data height for(int i=0; i<IC; i++){ for(int m=0; m<K; m++){ // pooling size for(int n=0; n<K; n++){ // pooling size if(h >= OH*K || w >= OW*K) continue; if(m==0 && n==0){ out[(h/K)*OW*OC + (w/K)*OC + i] = in[h*IW*IC + w*IC + i]; } else if(out[(h/K)*OW*OC + (w/K)*OC + i] < in[(h+m)*IW*IC + (w+n)*IC + i]){ out[(h/K)*OW*OC + (w/K)*OC + i] = in[(h+m)*IW*IC + (w+n)*IC + i]; } } } } } } } } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW> inline void Flatten( hls::stream<IT> &in, hls::stream<OT> &out ){ //#pragma HLS DATAFLOW OT tmp[OC*OH*OW]; for(int h=0; h<IH; h++){ for(int w=0; w<IW; w++){ for(int c=0; c<IC; c++){ tmp[c*IH*IW + h*IW + w] = in.read(); } } } for(int i=0; i<IC*IH*IW; i++){ out.write(tmp[i]); } //Debug_print<OT, OC, OH, OW>(tmp, "Flatten"); } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW> inline void Flatten( IT in [IC*IH*IW], OT out[OC*OH*OW] ){ #pragma omp parallel { #pragma omp for for(int h=0; h<IH; h++){ for(int w=0; w<IW; w++){ for(int c=0; c<IC; c++){ out[c*IH*IW + h*IW + w] = in[h*IW*IC + w*IC + c]; } } } } } template <typename T, int L> class CyclicSR{ private: T reg[L]; public: void set_top(T val){ reg[0] = val; } T get_top(void){ return reg[0]; } void shift(void){ #pragma HLS ARRAY_PARTITION variable=reg complete dim=1 #pragma HLS inline region T tmp = reg[0]; for(int i=0; i<L-1; i++){ #pragma HLS UNROLL reg[i] = reg[i+1]; } reg[L-1] = tmp; } }; template <typename IT, typename OT, typename WT, typename BT, int IC, int IH, int IW, int OC, int OH, int OW> inline void Gemm( hls::stream<IT> &in, hls::stream<OT> &out, const WT weight[IW*OW], const BT bias[OW] ) { // #pragma HLS allocation instances=mul limit=OW operation // #pragma HLS array_partition variable=weight block factor=OW dim=1 // //#pragma HLS array_partition variable=bias complete dim=0 // CyclicSR<OT, OW> sum_buf; // #pragma HLS array_partition variable=sum_buf complete dim=1 // for(int o=0; o<OW; o++){ // //sum_buf[o] = bias[o]; // sum_buf.set_top(bias[o]); // sum_buf.shift(); // } // IT shift_reg[OW]; // #pragma HLS array_partition variable=shift_reg complete dim=1 // for(int o=0; o<OW; o++){ // #pragma HLS UNROLL // shift_reg[o] = 0; // } // for(int i=0; i<IW+OW-1; i++){ // IT in_tmp; // if(i>=IW) in_tmp = 0; // else in_tmp = in.read(); // for(int o=OW-1; o>=1; o--){ // #pragma HLS UNROLL // shift_reg[o] = shift_reg[o-1]; // } // shift_reg[0] = in_tmp; // for(int o=0; o<OW; o++){ // //#pragma HLS UNROLL // unsigned int idx = i-o; // idx = idx >= IW ? IW-1 : idx; // //sum_buf[o] += shift_reg[o] * weight[o*IW + idx]; // sum_buf.set_top(sum_buf.get_top() + shift_reg[o] * weight[o*IW + idx]); // sum_buf.shift(); // } // } // OT sum_buf[OW]; // for(int i=-1; i<IW; i++){ // #pragma HLS PIPELINE II=OW // IT in_tmp; // if(i>=0) in_tmp = in.read(); // for(int o=0; o<OW; o++){ // OT fetched_data = sum_buf[o]; // if(i==-1){ // sum_buf[o] = bias[o]; // } // else{ // sum_buf[o] = fetched_data + in_tmp * weight[o*IW + i]; // } // } // } // for(int o=0; o<OW; o++) out.write(sum_buf[o]); IT in_buf[IW]; for(int i=0; i<IW; i++) in_buf[i] = in.read(); OT out_buf[OW]; for(int o=0; o<OW; o++){ out_buf[o] = inner_product<IT, WT, OT, IW>(in_buf, &weight[o*IW]); out_buf[o] += bias[o]; } for(int o=0; o<OW; o++) out.write(out_buf[o]); // for(int o=0; o<OW; o++){ // } // for(int o=0; o<OW; o++){ // //out.write(sum_buf[0]); // out.write(sum_buf.get_top()); // sum_buf.shift(); // // for(int i=0; i<OW-1; i++){ // // #pragma HLS UNROLL // // sum_buf[i] = sum_buf[i+1]; // // } // //#pragma HLS PIPELINE // } } template <typename IT, typename OT, typename WT, typename BT, int IC, int IH, int IW, int OC, int OH, int OW> inline void Gemm( IT in [IW], OT out[OW], const WT weight[IW*OW], const BT bias[OW] ){ #pragma omp parallel { #pragma omp for // for(int o=0; o<OW; o++){ // out[o] = inner_product<IT, WT, OT, IW>(in, &weight[o*IW]); // out[o] += bias[o]; // } for(int o=0; o<OW; o++){ out[o] = bias[o]; for(int i=0; i<IW; i++){ out[o] += in[i] * weight[o*IW + i]; } } } } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW> inline void LogSoftmax( hls::stream<IT> &in, hls::stream<OT> &out ){ //#pragma HLS DATAFLOW IT max_val = in.read(); OT max_idx = 0; SM: for(int i = 1; i < IW; i++){ IT tmp = in.read(); if( max_val < tmp){ max_val = tmp; max_idx = i; } } out.write(max_idx); } template <typename IT, typename OT, int IC, int IH, int IW, int OC, int OH, int OW> inline void LogSoftmax( IT in[IW], OT out[1] ){ IT max_val = in[0]; OT max_idx = 0; SM: for(int i = 1; i < IW; i++){ if( max_val < in[i]){ max_val = in[i]; max_idx = i; } } out[0] = max_idx; } #endif
pr64824.c
/* PR c/64824 */ /* { dg-do run } */ int main () { long long a; long long b = 1LL; int c = 3; #pragma omp atomic capture a = b = c << b; if (b != 6LL || a != 6LL) __builtin_abort (); return 0; }