source
stringlengths
3
92
c
stringlengths
26
2.25M
clipperz_srp_fmt_plug.c
/* This software was repurposed by Dhiru Kholia (dhiru at openwall.com) * in 2012. * * This software was written by Jim Fougeron jfoug AT cox dot net * in 2012. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2012 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * Format was busted, just like wow-srp. It ONLY was handling binary residue * if the residue was exactly 64 hex bytes long. Well for exponentation, it * does not have to be 64 bytes. It can be shorter. We also handle case where * a shorter result number is 0 Lpadded to an even 64 bytes. split() should * be added to canonize these hashes, since they are same hash with * multiple representations. * * This implements the SRP protocol, with Clipperz documented * implementation specifics. * * s = random salt value. * * v is the 'verifier' value (256 bit value). * * Clipperz's offline database has following relevant fields, * * <script>_clipperz_dump_data_ = { ... * * '2f2134e38b23534adfcd43c2f7223caf3a53a8db7ce800f1e918e8e0d06b8b7a': { * s: 'e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32', * v: 'e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f * version: '0.2', * ... * } * P algorithm: * h1 = hashlib.sha256(password + username).digest() * P = h2 = hashlib.sha256(h1).hexdigest() * * x algorithm: * x1 = hashlib.sha256(s + P).digest() * x = hashlib.sha256(x1).hexdigest() * * v algorithm: * v = Clipperz.Crypto.SRP.g().powerModule(new Clipperz.Crypto.BigInt(x,16),Clipperz.Crypto.SRP.n()); * n = 125617018995153554710546479714086468244499594888726646874671447258204721048803 * g = 2 */ #if FMT_EXTERNS_H extern struct fmt_main fmt_clipperz; #elif FMT_REGISTERS_H john_register_one(&fmt_clipperz); #else #if AC_BUILT /* need to know if HAVE_LIBGMP is set, for autoconfig build */ #include "autoconfig.h" #endif #include <string.h> #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef HAVE_LIBGMP #if HAVE_GMP_GMP_H #include <gmp/gmp.h> #else #include <gmp.h> #endif #define EXP_STR " GMP-exp" #else #include <openssl/bn.h> #define EXP_STR " oSSL-exp" #endif #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "Clipperz" #define FORMAT_NAME "SRP" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR EXP_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CLIPPERZSIG "$clipperz$" #define CLIPPERZSIGLEN 10 #define PLAINTEXT_LENGTH 16 #define CIPHERTEXT_LENGTH 65 #define BINARY_SIZE 33 #define BINARY_ALIGN 4 #define FULL_BINARY_SIZE 33 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 1 #define USERNAMELEN 32 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 4 #define SZ 128 // salt is in hex (salt and salt2) static struct fmt_tests tests[] = { {CLIPPERZSIG"e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f$e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32*hackme@mailinator.com", "openwall"}, {"$clipperz$05b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"}, {"$clipperz$5b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"}, {NULL} }; #ifdef HAVE_LIBGMP typedef struct t_SRP_CTX { mpz_t z_mod, z_base, z_exp, z_rop; } SRP_CTX; #else typedef struct t_SRP_CTX { BIGNUM *z_mod, *z_base, *z_exp, *z_rop; BN_CTX *BN_ctx; }SRP_CTX; #endif static SRP_CTX *pSRP_CTX; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; // BN_bn2bin sometimes tries to write 33 bytes, hence allow some padding! // that is because these are mod 0x115B8B692E0E045692CF280B436735C77A5A9E8A9E7ED56C965F87DB5B2A2ECE3 // which is a 65 hex digit number (33 bytes long). static ARCH_WORD_32 (*crypt_out)[(FULL_BINARY_SIZE/4) + 1]; static struct custom_salt { unsigned char saved_salt[SZ]; unsigned char user_id[SZ]; } *cur_salt; #ifdef HAVE_LIBGMP static int max_keys_per_crypt; #endif static void init(struct fmt_main *self) { int i; #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); pSRP_CTX = mem_calloc_align(sizeof(*pSRP_CTX), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #ifdef HAVE_LIBGMP max_keys_per_crypt = self->params.max_keys_per_crypt; #endif for (i = 0; i < self->params.max_keys_per_crypt; ++i) { #ifdef HAVE_LIBGMP mpz_init_set_str(pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803", 10); mpz_init_set_str(pSRP_CTX[i].z_base, "2", 10); mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10); mpz_init(pSRP_CTX[i].z_rop); // Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA256 value // we need to put into it. Then we simply need to copy in the data, and possibly set // the limb count size. mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159); #else pSRP_CTX[i].z_mod=BN_new(); BN_dec2bn(&pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803"); pSRP_CTX[i].z_base=BN_new(); BN_set_word(pSRP_CTX[i].z_base, 2); pSRP_CTX[i].z_exp=BN_new(); pSRP_CTX[i].z_rop=BN_new(); pSRP_CTX[i].BN_ctx = BN_CTX_new(); #endif } } void done(void) { #ifdef HAVE_LIBGMP int i; for (i = 0; i < max_keys_per_crypt; ++i) { mpz_clear(pSRP_CTX[i].z_mod); mpz_clear(pSRP_CTX[i].z_base); mpz_clear(pSRP_CTX[i].z_exp); mpz_clear(pSRP_CTX[i].z_rop); } #endif MEM_FREE(pSRP_CTX); MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p = NULL; if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += CLIPPERZSIGLEN; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (strlen(p) > CIPHERTEXT_LENGTH) goto err; if (!ishex_oddOK(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) goto err; if (strlen(p) > SZ-1) goto err; if ((p = strtokm(NULL, "*")) == NULL) goto err; if (strlen(p) > SZ-1) goto err; if ((p = strtokm(NULL, "*"))) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char ct[128+2*SZ+1]; char *cp; if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN)) return ciphertext; strnzcpy(ct, ciphertext, sizeof(ct)); cp = strchr(&ct[CLIPPERZSIGLEN], '$'); if (!cp) return ciphertext; *cp = 0; strlwr(&ct[CLIPPERZSIGLEN]); *cp = '$'; if (ct[CLIPPERZSIGLEN] == '0') { char *cpi = &ct[CLIPPERZSIGLEN]; char *cpo = cpi; while (*cpi == '0') ++cpi; do { *cpo++ = *cpi; } while (*cpi++); } return ct; } static void *get_binary(char *ciphertext) { static union { unsigned char c[FULL_BINARY_SIZE]; ARCH_WORD_32 dummy[1]; } buf; unsigned char *out = buf.c; char *p, *q; int i; p = &ciphertext[CLIPPERZSIGLEN]; q = strchr(p, '$'); memset(buf.c, 0, sizeof(buf)); while (*p == '0') ++p; if ((q-p)&1) { out[0] = atoi16[ARCH_INDEX(*p)]; ++p; } else { out[0] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } for (i = 1; i < FULL_BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; if (p >= q) break; } return out; } static void *get_salt(char *ciphertext) { char *p; char *q; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); p = ciphertext; p = strchr(&ciphertext[CLIPPERZSIGLEN], '$') + 1; q = strrchr(ciphertext, '*'); strncpy((char*)cs.saved_salt, p, q - p); p = strrchr(ciphertext, '*') + 1; strcpy((char*)cs.user_id, p); return (void *)&cs; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int salt_hash(void *salt) { unsigned int hash = 0; char *p = (char *)salt; while (*p) { hash <<= 1; hash += (unsigned char)*p++; if (hash >> SALT_HASH_LOG) { hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); } } hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); return hash; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); } static char *get_key(int index) { return saved_key[index]; } static inline void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int j; #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < count; ++j) { SHA256_CTX ctx; unsigned char Tmp[32]; unsigned char TmpHex[64]; memset(crypt_out[j], 0, sizeof(crypt_out[j])); SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[j], strlen(saved_key[j])); SHA256_Update(&ctx, cur_salt->user_id, strlen((char*)cur_salt->user_id)); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, Tmp, 32); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, cur_salt->saved_salt, strlen((char*)cur_salt->saved_salt)); hex_encode(Tmp, 32, TmpHex); SHA256_Update(&ctx, TmpHex, 64); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, Tmp, 32); SHA256_Final(Tmp, &ctx); #ifdef HAVE_LIBGMP { unsigned char HashStr[80], *p; int i, todo; p = HashStr; for (i = 0; i < 32; ++i) { *p++ = itoa16[Tmp[i]>>4]; *p++ = itoa16[Tmp[i]&0xF]; } *p = 0; mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16); mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod ); mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop); p = HashStr; todo = strlen((char*)p); if (todo&1) { ((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)]; ++p; --todo; } else { ((unsigned char*)(crypt_out[j]))[0] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; todo -= 2; } todo >>= 1; for (i = 1; i <= todo; i++) { ((unsigned char*)(crypt_out[j]))[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } } #else // using oSSL's BN to do expmod. pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,32,pSRP_CTX[j].z_exp); BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx); BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j])); #endif } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; ++i) { if (*((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[i]))) return 1; } return 0; } static int cmp_one(void *binary, int index) { return *((ARCH_WORD_32*)binary) == *((ARCH_WORD_32*)(crypt_out[index])); } static int cmp_exact(char *source, int index) { return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE); } struct fmt_main fmt_clipperz = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
bslevd2.c
#include "laev2.h" #include "wnrme.h" #include "rnd.h" #include "timer.h" int main(int argc, char *argv[]) { (void)set_cbwr(); if (4 != argc) { (void)fprintf(stderr, "%s filename 2^{batch_size} #batches\n", *argv); return EXIT_FAILURE; } const size_t n = ((size_t)1u << atoz(argv[2u])); int th = 0; #ifdef _OPENMP th = omp_get_max_threads(); if (n % th) { (void)fprintf(stderr, "batch_size has to be a multiple of %d.\n", th); return EXIT_FAILURE; } #endif /* _OPENMP */ const size_t b = atoz(argv[3u]); if (!b) return EXIT_SUCCESS; const size_t nl = strlen(argv[1u]), nl1 = (nl + 1u); char *const fn = calloc((nl + 3u), sizeof(char)); assert(fn); strcpy(fn, argv[1u])[nl] = '.'; int fm = O_RDONLY; #ifdef _LARGEFILE64_SOURCE fm |= O_LARGEFILE; #endif /* _LARGEFILE64_SOURCE */ fn[nl1] = 'k'; const int fk = open(fn, fm); if (-1 >= fk) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'l'; const int fl = open(fn, fm); if (-1 >= fl) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'f'; const int ff = open(fn, fm); if (-1 >= ff) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'g'; const int fg = open(fn, fm); if (-1 >= fg) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } fn[nl1] = 'h'; const int fh = open(fn, fm); if (-1 >= fh) { (void)fprintf(stderr, "Cannot open %s for reading!\n", fn); return EXIT_FAILURE; } const size_t nt = n * sizeof(float); float *const a11 = (float*)aligned_alloc(sizeof(float), nt), *const a22 = (float*)aligned_alloc(sizeof(float), nt), *const a21 = (float*)aligned_alloc(sizeof(float), nt), *const cs1 = (float*)aligned_alloc(sizeof(float), nt), *const sn1 = (float*)aligned_alloc(sizeof(float), nt), *const l1 = (float*)aligned_alloc(sizeof(float), nt), *const l2 = (float*)aligned_alloc(sizeof(float), nt); assert(a11); assert(a22); assert(a21); assert(cs1); assert(sn1); assert(l1); assert(l2); unsigned rd[2u] = { 0u, 0u }; const uint64_t hz = tsc_get_freq_hz_(rd); (void)fprintf(stderr, "TSC frequency: %llu+(%u/%u) Hz.\n", (unsigned long long)hz, rd[0u], rd[1u]); (void)fflush(stderr); (void)fprintf(stdout, "\"B\",\"Ts\",\"ORT\",\"REN\",\"RLN\",\"RLX\",\"RLM\"\n"); (void)fflush(stdout); const char *bf = (const char*)NULL; if (b <= 10u) bf = "%1zu"; else if (b <= 100u) bf = "%2zu"; else if (b <= 1000u) bf = "%3zu"; else // b > 1000 bf = "%zu"; const size_t n_t = n / imax(th, 1); const size_t cnt = n_t * sizeof(float); char a[31u] = { '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0','\0' }; for (size_t j = 0u; j < b; ++j) { (void)fprintf(stdout, bf, j); (void)fflush(stdout); const size_t jn = j * n; #ifdef _OPENMP #pragma omp parallel default(none) shared(ff,fg,fh,a11,a22,a21,n,n_t,cnt,jn) #endif /* _OPENMP */ { const int mt = #ifdef _OPENMP omp_get_thread_num() #else /* !_OPENMP */ 0 #endif /* ?_OPENMP */ ; const size_t tnt = mt * n_t; const off_t off = (jn + tnt) * sizeof(float); if ((ssize_t)cnt != pread(ff, (a11 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fg, (a22 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fh, (a21 + tnt), cnt, off)) exit(EXIT_FAILURE); } (void)fprintf(stdout, ","); (void)fflush(stdout); uint64_t be[2u] = { UINT64_C(0), UINT64_C(0) }; be[0u] = rdtsc_beg(rd); #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,a11,a22,a21,l1,l2,cs1,sn1) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { #ifdef USE_INL _slaev2((a11 + i), (a21 + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (sn1 + i)); #else /* !USE_INL */ LAPACK_S(laev2)((a11 + i), (a21 + i), (a22 + i), (l1 + i), (l2 + i), (cs1 + i), (sn1 + i)); #endif /* ?USE_INL */ } be[1u] = rdtsc_end(rd); (void)fprintf(stdout, "%15.9Lf,", tsc_lap(hz, be[0u], be[1u])); (void)fflush(stdout); wide o = W_ZERO, r = W_ZERO; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,a11,a22,a21,cs1,sn1,l1,l2) reduction(max:o,r) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { wide AE = W_ZERO, AN = W_ZERO; o = fmaxw(o, worr(cs1[i], sn1[i])); r = fmaxw(r, wrer(a11[i], a22[i], a21[i], cs1[i], sn1[i], l1[i], l2[i], &AE, &AN)); } (void)fprintf(stdout, "%s,", xtoa(a, (long double)o)); (void)fprintf(stdout, "%s", xtoa(a, (long double)r)); (void)fflush(stdout); #ifdef _OPENMP #pragma omp parallel default(none) shared(fk,fl,cs1,sn1,n,n_t,cnt,jn) #endif /* _OPENMP */ { const int mt = #ifdef _OPENMP omp_get_thread_num() #else /* !_OPENMP */ 0 #endif /* ?_OPENMP */ ; const size_t tnt = mt * n_t; const off_t off = (jn + tnt) * sizeof(float); if ((ssize_t)cnt != pread(fk, (cs1 + tnt), cnt, off)) exit(EXIT_FAILURE); if ((ssize_t)cnt != pread(fl, (sn1 + tnt), cnt, off)) exit(EXIT_FAILURE); } (void)fprintf(stdout, ","); (void)fflush(stdout); wide x = W_ZERO, m = W_ZERO; r = W_ZERO; #ifdef _OPENMP #pragma omp parallel for default(none) shared(n,l1,l2,cs1,sn1) reduction(max:r,x,m) #endif /* _OPENMP */ for (size_t i = 0u; i < n; ++i) { wide AE = W_ZERO, AN = W_ZERO; const wide RE = wlam(l1[i], l2[i], cs1[i], sn1[i], &AE, &AN); r = fmaxw(r, RE); x = fmaxw(x, AE); m = fmaxw(m, AN); } (void)fprintf(stdout, "%s,", xtoa(a, (long double)r)); (void)fprintf(stdout, "%s,", xtoa(a, (long double)x)); (void)fprintf(stdout, "%s\n", xtoa(a, (long double)m)); (void)fflush(stdout); } (void)close(fh); (void)close(fg); (void)close(ff); (void)close(fl); (void)close(fk); free(l2); free(l1); free(sn1); free(cs1); free(a21); free(a22); free(a11); free(fn); return EXIT_SUCCESS; }
sphgrid.h
#ifndef SPHGRID_H #define SPHGRID_H #include <omp.h> #include <vector> #include <map> #include <unordered_map> #include <cmath> #include <algorithm> #include <glm/vec3.hpp> #include <glm/mat3x3.hpp> #include <glm/geometric.hpp> #include <glm/gtx/scalar_multiplication.hpp> #include "sphparticle.h" #include "sphmap.h" #include "sphparticle.h" #include "marching_cube.h" #define PI 3.14159265358979323846264338327950288 class SpatialGrid { private: int imax, jmax, kmax; vec3 lb, ub; float g_d, smtRadius; float* densityTable; vec3* normTable; FluidParticle** gridPs; float f(float q) { float ans; if (0 <= q && q < 1) ans = 2.0 / 3.0 - pow(q, 2) + 0.5 * pow(q, 3); else if (1 <= q && q < 2) ans = 1.0 / 6.0 * pow(2 - q, 3); else ans = 0.0; return ans * 3.0 / 2.0 / PI; } float df(float q) { float ans; if (0 <= q && q < 1) ans = -2 * q + 3.0 / 2.0 * pow(q, 2); else if (1 <= q && q < 2) ans = -0.5 * pow(2 - q, 2); else ans = 0.0; return ans * 3.0 / 2.0 / PI; } float W(FluidParticle* x_i, FluidParticle* x_j) { float q = distance(x_i->getPosition(), x_j->getPosition()) / smtRadius; return 1.0 / pow(smtRadius, 3) * f(q); } vec3 dW(FluidParticle* x_i, FluidParticle* x_j) { float q = distance(x_i->getPosition(), x_j->getPosition()) / smtRadius; vec3 norm = x_i->getPosition() - x_j->getPosition(); norm = norm / sqrt(dot(norm, norm)); return 1.0 / pow(smtRadius, 4) * df(q) * norm; } vec3 XYZ2Pos(int3 xyz) { return vec3(lb.x + xyz.x * g_d, lb.y + xyz.y * g_d, lb.z + xyz.z * g_d); } int3 index2XYZ(int index) { return int3(index / jmax / kmax, index / kmax % jmax, index % kmax); } vec3 index2Pos(int index) { return XYZ2Pos(index2XYZ(index)); } int XYZ2Index(int3 xyz) { return xyz.x * jmax * kmax + xyz.y * kmax + xyz.z; } public: SpatialGrid(vec3 lb, vec3 ub, float g_d, float smtRadius) { this->lb = lb; this->ub = ub; this->g_d = g_d; this->smtRadius = smtRadius; this->imax = (ub.x - lb.x) / g_d + 1; this->jmax = (ub.y - lb.y) / g_d + 1; this->kmax = (ub.z - lb.z) / g_d + 1; densityTable = new float[imax * jmax * kmax]; normTable = new vec3[imax * jmax * kmax]; gridPs = new FluidParticle * [imax * jmax * kmax]; for (int i = 0; i < imax * jmax * kmax; i++) { int3 xyz = index2XYZ(i); vec3 pos = XYZ2Pos(xyz); gridPs[i] = new FluidParticle(pos, 0.0f); densityTable[i] = 0.0f; normTable[i] = vec3(0.0f, 0.0f, 0.0f); } } void computeDensityTable(SpatialHashTable* hashTable) { for (int i = 0; i < imax * jmax * kmax; i++) { hashTable->computeNeighborBlocks(gridPs[i]); } #pragma omp parallel for for (int i = 0; i < imax * jmax * kmax; i++) { hashTable->computeNeighbors(gridPs[i]); gridPs[i]->setBuiltNeighborsFlag(true); } for (int i = 0; i < imax * jmax * kmax; i++) { float dens = 0; vec3 norm(0, 0, 0); vector<FluidParticle*>* neighbors = gridPs[i]->getNeighbors(true); for (int j = 0; j < neighbors->size(); j++) { FluidParticle* p_j = neighbors->at(j); dens += p_j->getMass() * W(gridPs[i], p_j); norm += p_j->getMass() * dW(gridPs[i], p_j); } densityTable[i] = dens; normTable[i] = -norm; } } int computeTriangleFacets(float* res, float isolevel) { vector<vec3> temp; int numTriangle = 0; int cube_edgeToVerts[12][2] = { {0,1}, {1,2}, {2,3}, {3,0}, {4,5}, {5,6}, {6,7}, {7,4}, {0,4}, {1,5}, {2,6}, {3,7}, }; for (int i = 0 ; i < imax * jmax * kmax ; i++) { int3 xyz = index2XYZ(i); vec3 pos = XYZ2Pos(xyz); if (xyz.x == imax - 1 || xyz.y == jmax - 1 || xyz.z == kmax - 1) continue; int3 indices[8] = { xyz + int3(0,0,0), xyz + int3(0,1,0), xyz + int3(1,1,0), xyz + int3(1,0,0), xyz + int3(0,0,1), xyz + int3(0,1,1), xyz + int3(1,1,1), xyz + int3(1,0,1) }; float dens[8]; vec3 poss[8]; vec3 norm[8]; for (int j = 0 ; j < 8 ; j++) { poss[j] = XYZ2Pos(indices[j]); dens[j] = densityTable[XYZ2Index(indices[j])]; norm[j] = normTable[XYZ2Index(indices[j])]; } numTriangle += Polygonise(poss, norm, dens, isolevel, &temp); } for (int i = 0; i < temp.size(); i++) { res[3 * i + 0] = temp[i].x; res[3 * i + 1] = temp[i].y; res[3 * i + 2] = temp[i].z; } return numTriangle; } }; #endif
.body.c
#define S1(zT0,zT1,t,i) b[i]=0.33333*(a[i-1]+a[i]+a[i+1]); #define S2(zT0,zT1,t,j) a[j]=b[j]; int t0, t1, t2, t3, t4; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in 0.04s. */ for (t0=-1;t0<=floord(3*T+N-4,1024);t0++) { lb1=max(max(ceild(2048*t0-1021,3072),ceild(1024*t0-T+1,1024)),0); ub1=min(min(floord(1024*t0+1023,1024),floord(2*T+N-3,1024)),floord(2048*t0+N+2045,3072)); #pragma omp parallel for shared(t0,lb1,ub1) private(t1,t2,t3,t4) for (t1=lb1; t1<=ub1; t1++) { if ((t0 <= floord(3072*t1-N+1,2048)) && (t1 >= ceild(N-1,1024))) { if ((-N+1)%2 == 0) { if ((-N+1)%2 == 0) { S2(t0-t1,t1,(1024*t1-N+1)/2,N-2) ; } } } for (t2=max(max(ceild(1024*t1-N+2,2),1024*t0-1024*t1),0);t2<=min(min(min(512*t1-2,T-1),1024*t0-1024*t1+1023),floord(1024*t1-N+1024,2));t2++) { for (t3=1024*t1;t3<=2*t2+N-2;t3++) { S1(t0-t1,t1,t2,-2*t2+t3) ; S2(t0-t1,t1,t2,-2*t2+t3-1) ; } S2(t0-t1,t1,t2,N-2) ; } for (t2=max(max(512*t1-1,1024*t0-1024*t1),0);t2<=min(min(T-1,1024*t0-1024*t1+1023),floord(1024*t1-N+1024,2));t2++) { S1(t0-t1,t1,t2,2) ; for (t3=2*t2+3;t3<=2*t2+N-2;t3++) { S1(t0-t1,t1,t2,-2*t2+t3) ; S2(t0-t1,t1,t2,-2*t2+t3-1) ; } S2(t0-t1,t1,t2,N-2) ; } for (t2=max(max(1024*t0-1024*t1,0),ceild(1024*t1-N+1025,2));t2<=min(min(T-1,1024*t0-1024*t1+1023),512*t1-2);t2++) { for (t3=1024*t1;t3<=1024*t1+1023;t3++) { S1(t0-t1,t1,t2,-2*t2+t3) ; S2(t0-t1,t1,t2,-2*t2+t3-1) ; } } for (t2=max(max(max(512*t1-1,1024*t0-1024*t1),0),ceild(1024*t1-N+1025,2));t2<=min(min(T-1,512*t1+510),1024*t0-1024*t1+1023);t2++) { S1(t0-t1,t1,t2,2) ; for (t3=2*t2+3;t3<=1024*t1+1023;t3++) { S1(t0-t1,t1,t2,-2*t2+t3) ; S2(t0-t1,t1,t2,-2*t2+t3-1) ; } } } } /* End of CLooG code */
os_cpu.c
int cpu_pid(void); int cpu_cores(void); double cpu_usage(void); #ifdef CPU_DEMO #pragma once #ifdef _WIN32 #include <windows.h> static ULONGLONG cpu_diff_time_(const FILETIME one, const FILETIME two) { LARGE_INTEGER a, b; a.LowPart = one.dwLowDateTime; a.HighPart = one.dwHighDateTime; b.LowPart = two.dwLowDateTime; b.HighPart = two.dwHighDateTime; return a.QuadPart - b.QuadPart; } #elif defined __linux__ #include <sched.h> #else #include <unistd.h> #endif // CPU currently used by current process: // src: https://stackoverflow.com/questions/63166/how-to-determine-cpu-and-memory-consumption-from-inside-a-process // src: https://stackoverflow.com/questions/23143693/retrieving-cpu-load-percent-total-in-windows-with-c double cpu_usage(void) { #ifndef _WIN32 return 0; #else static double last_ok = 0; static FILETIME prevSysIdle, prevSysKernel, prevSysUser; FILETIME sysIdle, sysKernel, sysUser; // sysKernel include IdleTime if (GetSystemTimes(&sysIdle, &sysKernel, &sysUser) == 0) { // GetSystemTimes func FAILED return value is zero; return last_ok; } if (prevSysIdle.dwLowDateTime != 0 && prevSysIdle.dwHighDateTime != 0) { ULONGLONG sysIdleDiff, sysKernelDiff, sysUserDiff; sysIdleDiff = cpu_diff_time_(sysIdle, prevSysIdle); sysKernelDiff = cpu_diff_time_(sysKernel, prevSysKernel); sysUserDiff = cpu_diff_time_(sysUser, prevSysUser); ULONGLONG sysTotal = sysKernelDiff + sysUserDiff; ULONGLONG kernelTotal = sysKernelDiff - sysIdleDiff; // kernelTime - IdleTime = kernelTime, because sysKernel include IdleTime if (sysTotal > 0) { // sometimes kernelTime > idleTime last_ok = (double)(((kernelTotal + sysUserDiff) * 100.0) / sysTotal); } } prevSysIdle = sysIdle; prevSysKernel = sysKernel; prevSysUser = sysUser; return last_ok; #endif } int cpu_cores(void) { #if defined __cplusplus return (int)std::thread::hardware_concurrency(); #elif defined _WIN32 DWORD_PTR pm, sm; if( GetProcessAffinityMask(GetCurrentProcess(), &pm, &sm) ) if( pm ) { int count = 0; while( pm ) { ++count; pm &= pm - 1; } return count; } { SYSTEM_INFO si; GetSystemInfo(&si); return (int)si.dwNumberOfProcessors; } #elif defined __linux__ cpu_set_t prevmask, testmask; CPU_ZERO(&prevmask); CPU_ZERO(&testmask); sched_getaffinity(0, sizeof(prevmask), &prevmask); //Get current mask sched_setaffinity(0, sizeof(testmask), &testmask); //Set zero mask sched_getaffinity(0, sizeof(testmask), &testmask); //Get mask for all CPUs sched_setaffinity(0, sizeof(prevmask), &prevmask); //Reset current mask int num = CPU_COUNT(&testmask); return (num > 1 ? num : 1); #elif defined __unix__ // unix int count = sysconf(_SC_NPROCESSORS_ONLN); return count > 0 ? count : 1; #else // omp int cores = 0; #pragma omp parallel { #pragma omp atomic ++cores; } return cores; #endif } int cpu_pid(void) { #ifdef _WIN32 return _getpid(); #else return getpid(); #endif } #ifdef CPU_DEMO #include <stdio.h> int main() { printf("process-id: %d\n", cpu_pid()); printf("%d cpus\n", cpu_cores()); for( int i = 0; i < 100000000; ++i) { float f = sqrt(1); } printf("%5.2f%% usage\n", cpu_usage()); } #define main main__ #endif // CPU_DEMO #endif // CPU_C
polar_utils.h
// // Created by valentinnodan on 11/5/20. // #pragma once #include <cmath> #include <vector> #include <iostream> #include <chrono> #include <sstream> #include "Symbol.h" #include "Transform.h" #include "../construct/PolarCodeConstruct.h" #include "../decoder/PolarDecoderBP.h" #include "../coder/PolarCoder.h" #include "../ldpc/DecoderBP.h" #include "../decoder/PolarDecoderLP.h" inline std::vector<size_t> genReversedIndex(size_t i) { std::vector<size_t> res; res.resize(i); res[0] = 0; for (size_t i1 = 1; i1 < i; i1 <<= 1u) { for (size_t j = 0; j < i1; ++j) { res[j] <<= 1u; res[j + i1] = res[j] + 1; } } return res; } inline std::pair<Message, Message> getOddsEvens(Message const &u, size_t uLength) { auto resEvens = std::vector<Symbol>(uLength / 2); auto resOddEvens = std::vector<Symbol>(uLength / 2); for (size_t i = 1; i < uLength; i += 2) { auto ind = (i - 1) / 2; resEvens[ind] = u[i]; resOddEvens[ind].symbol = (int) (u[i].symbol + u[i - 1].symbol) % 2; } return std::pair<Message, Message>(resOddEvens, resEvens); } inline void printWord(Message const &cW) { std::cout << "Word: "; for (size_t i = 0; i < cW.size(); ++i) { std::cout << cW[i] << " "; } std::cout << "\n"; } inline Message getRandomWord(size_t length) { auto res = Message(length); for (size_t i = 0; i < length; ++i) { res[i].symbol = rand() % 2; // res[i].symbol = 0; } return res; } inline int compareWords(Message const &a, Message const &b) { assert(a.size() == b.size()); int res = 0; for (size_t j = 0; j < a.size(); ++j) { double diff = a[j].symbol - b[j].symbol; if (std::abs(diff) > Symbol::EPS) { res++; } } return res; } inline std::string runSimulation(size_t N, size_t K, size_t wordsAmount, double alpha, double mu, bool isSystematic, bool isBER, bool isLP) { constexpr size_t LB = 2; constexpr size_t RB = 11; constexpr size_t SIZE = RB - LB; std::vector<std::string> bers = std::vector<std::string>(SIZE); std::vector<std::string> fers = std::vector<std::string>(SIZE); auto reversedIndexes = genReversedIndex(N); auto revPlace = std::vector<size_t>(N); for (size_t i = 0; i < N; i++) { revPlace[reversedIndexes[i]] = i; } auto frozen = Message(N - K); auto G = PolarCoder::getGN(N); #pragma omp parallel for for (size_t i = LB; i < RB; i++) { double ii = (double) i / 2; auto gaussianChannel = Channel(ii); auto cPC = PolarCodeConstruct(ii); auto A = cPC.construct(N, K); auto AA = std::set<size_t>(); for (size_t j : A) { AA.emplace(reversedIndexes[j]); } A = AA; auto H = transform(N, A); auto pair = DecoderBP::prepare(H); auto decoder = PolarDecoderLP(gaussianChannel, pair, N, K); if (isLP) { decoder.setAlpha(alpha); decoder.setMu(mu); } int e = 0; int eBER = 0; int eFER = 0; size_t wI = 0; while (!(eFER > 15 && wI > wordsAmount)) { // std::cout << eBER << " " << eFER << " " << wI << std::endl; wI++; auto myMsg = getRandomWord(K); auto cW = PolarCoder::encode(myMsg, A, frozen, reversedIndexes, false); if (isSystematic) { auto cc = Message(); for (size_t t = 0; t < N; t++) { if (A.count(t) == 1) { cc.push_back(cW[t]); } } cW = PolarCoder::encode(cc, A, frozen, reversedIndexes, false); } auto gaussWord = gaussianChannel.Gauss(cW, N, K); auto decodedWord = decoder.decode(gaussWord, A); if (!isSystematic) { decodedWord = PolarCoder::encode(decodedWord, A, frozen, reversedIndexes, false); } auto dW = Message(); for (size_t p = 0; p < N; p++) { if (A.count(p) == 1) { dW.emplace_back(decodedWord[p]); } } if (compareWords(dW, myMsg) > 0) { // std::cout << compareWords(dW, myMsg) << std::endl; if (isBER) { e += compareWords(dW, myMsg); } else { e += 1; } eBER += compareWords(dW, myMsg); eFER += 1; } } std::ostringstream ssBER; std::ostringstream ssFER; ssBER << ii << " " << (double) eBER / (wI * K); bers[i - LB] = (ssBER.str()); ssFER << ii << " " << (double) eFER / wI; fers[i - LB] = (ssFER.str()); // std::cout << ii << " " << (double) eBER / (wI * K) << std::endl; } std::ostringstream output; if (isLP) { output << "alpha: " << alpha << std::endl; output << "mu: " << mu << std::endl; } output << "BER" << std::endl; for (auto &ber : bers) { output << ber << std::endl; } output << "FER" << std::endl; for (auto &fer : fers) { output << fer << std::endl; } return output.str(); } inline void runSimulationSC(size_t N, size_t K, size_t wordsAmount, bool isBER) { auto reversedIndexes = genReversedIndex(N); auto revPlace = std::vector<size_t>(N); for (size_t i = 0; i < N; i++) { revPlace[reversedIndexes[i]] = i; } auto frozen = Message(N - K); auto H = transform(N, std::set<size_t>()); auto G = PolarCoder::getGN(N); auto pair = DecoderBP::prepare(H); for (size_t i = 2; i < 20; i++) { double ii = (double) i / 2; auto gaussianChannel = Channel(ii); auto cPC = PolarCodeConstruct(ii); auto A = cPC.construct(N, K); auto decoder = PolarDecoderSC(gaussianChannel, N, K); int e = 0; for (size_t j = 0; j < wordsAmount; j++) { auto myMsg = getRandomWord(K); auto cW = PolarCoder::encode(myMsg, A, frozen, reversedIndexes, true); auto gaussWord = gaussianChannel.Gauss(cW, N, K); auto decodedWord = decoder.decode(gaussWord, A); auto dW = Message(); for (size_t p = 0; p < N; p++) { if (A.count(p) == 1) { dW.emplace_back(decodedWord[p]); } } if (compareWords(dW, myMsg) > 0) { if (isBER) { e += compareWords(dW, myMsg); } else { e += 1; } } } size_t totalAmount; if (isBER) { totalAmount = wordsAmount * K; } else { totalAmount = wordsAmount; } std::cout << ii << " " << (double) e / totalAmount << std::endl; } }
ordered_doacross_codegen.c
// RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NORMAL // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,CHECK-NORMAL // RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-IRBUILDER // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,CHECK-IRBUILDER // RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: [[KMP_DIM:%.+]] = type { i64, i64, i64 } extern int n; int a[10], b[10], c[10], d[10]; void foo(); // CHECK-LABEL: @main() int main() { int i; // CHECK: [[DIMS:%.+]] = alloca [1 x [[KMP_DIM]]], // CHECK-NORMAL: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]]) // CHECK: icmp // CHECK-NEXT: br i1 % // CHECK: [[CAST:%.+]] = bitcast [1 x [[KMP_DIM]]]* [[DIMS]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 24, i1 false) // CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0 // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 1 // CHECK: store i64 %{{.+}}, i64* % // CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 2 // CHECK: store i64 1, i64* % // CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0 // CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIM]] to i8* // CHECK-NORMAL: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 1, i8* [[CAST]]) // CHECK-NORMAL: call void @__kmpc_for_static_init_4(%struct.ident_t* @{{.+}}, i32 [[GTID]], i32 33, i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1) #pragma omp for ordered(1) for (i = 0; i < n; ++i) { a[i] = b[i] + 1; foo(); // CHECK: call void (...) [[FOO:.+]]( // CHECK: load i32, i32* [[I:%.+]], // CHECK-NEXT: sub nsw i32 %{{.+}}, 0 // CHECK-NEXT: sdiv i32 %{{.+}}, 1 // CHECK-NEXT: sext i32 %{{.+}} to i64 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]], // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0 // CHECK-NORMAL-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]]) // CHECK-IRBUILDER-NEXT: [[GTID1:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]]) // CHECK-IRBUILDER-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID1]], i64* [[TMP]]) #pragma omp ordered depend(source) c[i] = c[i] + 1; foo(); // CHECK: call void (...) [[FOO]] // CHECK: load i32, i32* [[I]], // CHECK-NEXT: sub nsw i32 %{{.+}}, 2 // CHECK-NEXT: sub nsw i32 %{{.+}}, 0 // CHECK-NEXT: sdiv i32 %{{.+}}, 1 // CHECK-NEXT: sext i32 %{{.+}} to i64 // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0 // CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]], // CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0 // CHECK-NORMAL-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]]) // CHECK-IRBUILDER-NEXT: [[GTID2:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]]) // CHECK-IRBUILDER-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID2]], i64* [[TMP]]) #pragma omp ordered depend(sink : i - 2) d[i] = a[i - 2]; } // CHECK: call void @__kmpc_for_static_fini( // CHECK-NORMAL: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]]) // CHECK: ret i32 0 return 0; } #endif // HEADER
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <dmlc/serializer.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4, kStr = 5 }; enum class FeatureType : uint8_t { kNumerical, kCategorical }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 11; /*! \brief number of rows in the data */ uint64_t num_row_{0}; // NOLINT /*! \brief number of columns in the data */ uint64_t num_col_{0}; // NOLINT /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; // NOLINT /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; // NOLINT /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; // NOLINT /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; // NOLINT /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; // NOLINT /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT /*! * \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q" */ std::vector<std::string> feature_type_names; /*! * \brief Name for each feature. */ std::vector<std::string> feature_names; /* * \brief Type of each feature. Automatically set when feature_type_names is specifed. */ HostDeviceVector<FeatureType> feature_types; /* * \brief Weight of each feature, used to define the probability of each feature being * selected when using column sampling. */ HostDeviceVector<float> feature_weigths; /*! \brief default constructor */ MetaInfo() = default; MetaInfo(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo const& that) = delete; /*! * \brief Validate all metainfo. */ void Validate(int32_t device) const; MetaInfo Slice(common::Span<int32_t const> ridxs) const; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); void GetInfo(char const* key, bst_ulong* out_len, DataType dtype, const void** out_dptr) const; void SetFeatureInfo(const char *key, const char **info, const bst_ulong size); void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const; /* * \brief Extend with other MetaInfo. * * \param that The other MetaInfo object. * * \param accumulate_rows Whether rows need to be accumulated in this function. If * client code knows number of rows in advance, set this parameter to false. */ void Extend(MetaInfo const& that, bool accumulate_rows); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0) : gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {} inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_page_size != other.gpu_page_size; } }; struct HostSparsePageView { using Inst = common::Span<Entry const>; common::Span<bst_row_t const> offset; common::Span<Entry const> data; Inst operator[](size_t i) const { auto size = *(offset.data() + i + 1) - *(offset.data() + i); return {data.data() + *(offset.data() + i), static_cast<Inst::index_type>(size)}; } size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid {0}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; HostSparsePageView GetView() const { return {offset.ConstHostSpan(), data.ConstHostSpan()}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); dmlc::OMPException exc; #pragma omp parallel for schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { exc.Run([&]() { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } }); } exc.Rethrow(); } /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); EllpackPage(EllpackPage&& that); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; class GHistIndexMatrix; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; // NOLINT explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator&) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {} BatchIterator<T> begin() { return begin_iter_; } // NOLINT BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT private: BatchIterator<T> begin_iter_; }; struct XGBAPIThreadLocalEntry; /*! * \brief Internal data structured used by XGBoost during training. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; virtual void SetInfo(const char *key, const void *dptr, DataType dtype, size_t num) { this->Info().SetInfo(key, dptr, dtype, num); } virtual void SetInfo(const char* key, std::string const& interface_str) { this->Info().SetInfo(key, interface_str); } /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /*! \brief Get thread local memory for returning data from DMatrix. */ XGBAPIThreadLocalEntry& GetThreadLocal() const; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix(); /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); /** * \brief Create a new Quantile based DMatrix used for histogram based algorithm. * * \tparam DataIterHandle External iterator type, defined in C API. * \tparam DMatrixHandle DMatrix handle, defined in C API. * \tparam DataIterResetCallback Callback for reset, prototype defined in C API. * \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API. * * \param iter External data iterator * \param proxy A hanlde to ProxyDMatrix * \param reset Callback for reset * \param next Callback for next * \param missing Value that should be treated as missing. * \param nthread number of threads used for initialization. * \param max_bin Maximum number of bins. * * \return A created quantile based DMatrix. */ template <typename DataIterHandle, typename DMatrixHandle, typename DataIterResetCallback, typename XGDMatrixCallbackNext> static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int nthread, int max_bin); virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0; /*! \brief Number of rows per page in external memory. Approximately 100MB per page for * dataset with 100 features. */ static const size_t kPageSize = 32UL << 12UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual BatchSet<GHistIndexMatrix> GetGradientIndex(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } template<> inline BatchSet<GHistIndexMatrix> DMatrix::GetBatches(const BatchParam& param) { return GetGradientIndex(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); namespace serializer { template <> struct Handler<xgboost::Entry> { inline static void Write(Stream* strm, const xgboost::Entry& data) { strm->Write(data.index); strm->Write(data.fvalue); } inline static bool Read(Stream* strm, xgboost::Entry* data) { return strm->Read(&data->index) && strm->Read(&data->fvalue); } }; } // namespace serializer } // namespace dmlc #endif // XGBOOST_DATA_H_
DRB007-indirectaccess3-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have distance of 12 (p1 - p2 = 12). They are used as base addresses for indirect array accesses using an index set (another array). An index set has two indices with distance of 12 : indexSet[3]- indexSet[0] = 533 - 521 = 12 So there is loop carried dependence for N=0 and N=3. We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. N is 180, two iteraions with N=0 and N= 3 have loop carried dependences. For static even scheduling, we must have at least 60 threads (180/60=3 iterations) so iteration 0 and 3 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 533, 529, 531, // 521+12=533 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 12; int i; // initialize segments touched by indexSet #pragma omp parallel for for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); return 0; }
loss.h
/** * Copyright (c) 2015 by Contributors * @file loss.h * @brief the basic class of a loss function */ #ifndef DIFACTO_LOSS_H_ #define DIFACTO_LOSS_H_ #include <string> #include <vector> #include "./base.h" #include "dmlc/data.h" #include "dmlc/omp.h" #include "./sarray.h" namespace difacto { /** * \brief the basic class of a loss function */ class Loss { public: /** * \brief the factory function * \param type the loss type such as "fm" * \param num_threads number of threads */ static Loss* Create(const std::string& type, int nthreads = DEFAULT_NTHREADS); /** \brief constructor */ Loss() : nthreads_(DEFAULT_NTHREADS) { } /** \brief deconstructor */ virtual ~Loss() { } /** * \brief init the loss function * * @param kwargs keyword arguments * @return the unknown kwargs */ virtual KWArgs Init(const KWArgs& kwargs) = 0; /** * \brief predict given the data and model weights. often known as "forward" * * @param data the data * @param param model weights * @return pred the predict results */ virtual void Predict(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* pred) = 0; /** * \brief evaluate the loss * * return logit loss in default * * @param label label * @param pred prediction * * @return the objective value */ virtual real_t Evaluate(dmlc::real_t const* label, const SArray<real_t>& pred) const { real_t objv = 0; #pragma omp parallel for reduction(+:objv) num_threads(nthreads_) for (size_t i = 0; i < pred.size(); ++i) { real_t y = label[i] > 0 ? 1 : -1; objv += log(1 + exp(- y * pred[i])); } return objv; } /** * \brief calculate gradient given the data and model weights. often known as "backward" * @param data the data * @param param model weights * @return grad the gradients */ virtual void CalcGrad(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* grad) = 0; /** * \brief set the number of threads */ void set_nthreads(int nthreads) { CHECK_GT(nthreads, 1); CHECK_LT(nthreads, 50); nthreads_ = nthreads; } int nthreads_; }; } // namespace difacto #endif // DIFACTO_LOSS_H_
packet-inl.h
/*! * Copyright (c) 2014 by Contributors * \file packet-inl.h * \brief Generic packet vectorization code */ #ifndef MSHADOW_PACKET_INL_H_ #define MSHADOW_PACKET_INL_H_ #if defined(__APPLE__) || defined(__FreeBSD__) #include <stdlib.h> #else #include <malloc.h> #endif #include "./base.h" #include "./tensor.h" #include "./expression.h" namespace mshadow { /*! \brief namespace of packet math*/ namespace packet { enum PacketArch { kPlain, kSSE2, }; #if MSHADOW_USE_SSE #define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kSSE2 #else #define MSHADOW_DEFAULT_PACKET ::mshadow::packet::kPlain #endif // whether packet operator is enabled. /*! * \brief Generic packet type * \tparam DType The data type of the packet. * \tparam Arch the Arch of the packet. */ template<typename DType, PacketArch Arch = MSHADOW_DEFAULT_PACKET> struct Packet; template<PacketArch Arch> struct AlignBytes { static const index_t value = 4; }; } // namespace packet } // namespace mshadow namespace mshadow { namespace packet { /*! * \brief analog to cudaMallocPitch, allocate a aligned space with num_line * lspace cells * \param out_pitch output parameter, the actuall space allocated for each line * \param lspace number of cells required for each line * \param num_line number of lines to be allocated */ inline void* AlignedMallocPitch(size_t *out_pitch, size_t lspace, size_t num_line) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t mask = (1 << bits) - 1; size_t pitch = ((lspace + mask) >> bits) << bits; *out_pitch = pitch; #ifdef _MSC_VER void *res = _aligned_malloc(pitch * num_line, 1 << bits); #else void *res; int ret = posix_memalign(&res, 1 << bits, pitch * num_line); CHECK_EQ(ret, 0) << "AlignedMallocPitch failed"; #endif if (res == NULL) { LOG(FATAL) << "AlignedMallocPitch failed"; } return res; } /*! * \brief free aligned space * \param ptr pointer to space to be freed */ inline void AlignedFree(void *ptr) { #ifdef _MSC_VER _aligned_free(ptr); #else free(ptr); #endif } /*! \brief check if a pointer is aligned */ template<PacketArch Arch> inline bool CheckAlign(size_t pitch) { const index_t bits = AlignBytes<Arch>::value; return !(pitch & ((1 << bits) - 1)); } /*! \brief check if a pointer is aligned */ template<PacketArch Arch> inline bool CheckAlign(void *ptr) { return CheckAlign<Arch>(reinterpret_cast<size_t>(ptr)); } /*! * \brief get upper bound of aligned index of size * \param size size of the array * \param fsize size of float */ template<typename DType, PacketArch Arch> inline index_t UpperAlign(index_t size) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t mask = (1 << bits) - 1; const index_t fsize = sizeof(DType); return (((size * fsize + mask) >> bits) << bits) / fsize; } /*! * \brief get lower bound of aligned index of size * \param size size of the array * \param fsize size of float */ template<typename DType, PacketArch Arch> inline index_t LowerAlign(index_t size) { const index_t bits = AlignBytes<MSHADOW_DEFAULT_PACKET>::value; const index_t fsize = sizeof(DType); return (((size * fsize) >> bits) << bits) / fsize; } /*! * \brief generic Packet operator * \tparam OP The operator * \tparam DType The data type * \tparam Arch The architecture. */ template<typename OP, typename DType, PacketArch Arch> struct PacketOp { static const bool kEnabled = false; }; // specialization of operators template<typename DType, PacketArch Arch> struct PacketOp<op::plus, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs + rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::minus, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs - rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::mul, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs * rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::div, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& lhs, const Packet<DType, Arch>& rhs) { return lhs / rhs; } }; template<typename DType, PacketArch Arch> struct PacketOp<op::identity, DType, Arch> { static const bool kEnabled = true; MSHADOW_CINLINE static Packet<DType, Arch> Map(const Packet<DType, Arch>& src) { return src; } }; // savers to do storage template<typename SV, typename TFloat, PacketArch Arch> struct Saver{ MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) { Packet<TFloat, Arch> lhs = Packet<TFloat, Arch>::Load(dst); Packet<TFloat, Arch> ans = PacketOp<typename SV::OPType, TFloat, Arch>::Map(lhs, src); ans.Store(dst); } }; template<typename TFloat, PacketArch Arch> struct Saver<sv::saveto, TFloat, Arch> { MSHADOW_CINLINE static void Save(TFloat *dst, const Packet<TFloat, Arch>& src) { src.Store(dst); } }; } // namespace packet } // namespace mshadow #include "packet/plain-inl.h" #if MSHADOW_USE_SSE && !defined(__CUDACC__) #include "packet/sse-inl.h" #endif namespace mshadow { namespace expr { typedef packet::PacketArch PacketArch; // same as plan, but use packet template<typename ExpType, typename DType, PacketArch Arch> class PacketPlan { public: /*! * \brief evaluate the expression at index [y][x], * x will be aligned to Packet<DType, Arch>::Size() */ MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const; MSHADOW_CINLINE DType Eval(index_t y, index_t x) const; }; template <typename Device, int dim, typename DType, PacketArch Arch> class PacketPlan<Tensor<Device, dim, DType>, DType, Arch> { public: explicit PacketPlan(const Tensor<Device, dim, DType> &t) :dptr_(t.dptr_), stride_(t.stride_) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::Packet<DType, Arch>::Load(&dptr_[y * stride_ + x]); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return dptr_[y * stride_ + x]; } private: const DType *dptr_; index_t stride_; }; template<typename DType, PacketArch Arch> class PacketPlan<ScalarExp<DType>, DType, Arch> { public: explicit PacketPlan(DType scalar) : scalar_(scalar) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::Packet<DType, Arch>::Fill(scalar_); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return scalar_; } private: DType scalar_; }; template<typename OP, typename TA, typename TB, int etype, typename DType, PacketArch Arch> class PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> { public: PacketPlan(const PacketPlan<TA, DType, Arch> &lhs, const PacketPlan<TB, DType, Arch> &rhs) : lhs_(lhs), rhs_(rhs) {} MSHADOW_CINLINE packet::Packet<DType, Arch> EvalPacket(index_t y, index_t x) const { return packet::PacketOp<OP, DType, Arch>::Map(lhs_.EvalPacket(y, x), rhs_.EvalPacket(y, x)); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return OP::Map(lhs_.Eval(y, x), rhs_.Eval(y, x)); } private: PacketPlan<TA, DType, Arch> lhs_; PacketPlan<TB, DType, Arch> rhs_; }; template<typename OP, typename TA, int etype, typename DType, PacketArch Arch> class PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> { public: PacketPlan(const PacketPlan<TA, DType, Arch> &src) : src_(src) {} MSHADOW_CINLINE packet::Packet<DType> EvalPacket(index_t y, index_t x) const { return packet::PacketOp<OP, DType, Arch>::Map(src_.EvalPacket(y, x)); } MSHADOW_CINLINE DType Eval(index_t y, index_t x) const { return OP::Map(src_.Eval(y, x)); } private: PacketPlan<TA, DType, Arch> src_; }; template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype> inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e); template<PacketArch Arch, typename DType> inline PacketPlan<ScalarExp<DType>, DType, Arch> MakePacketPlan(const ScalarExp<DType> &e) { return PacketPlan<ScalarExp<DType>, DType, Arch>(e.scalar_); } template<PacketArch Arch, typename T, typename DType> inline PacketPlan<T, DType, Arch> MakePacketPlan(const RValueExp<T, DType> &e) { return PacketPlan<T, DType, Arch>(e.self()); } template<PacketArch Arch, typename T, int dim, typename DType> inline PacketPlan<T, DType, Arch> MakePacketPlan(const MakeTensorExp<T, cpu, dim, DType> &e) { return PacketPlan<T, DType, Arch>(e.real_self()); } template<PacketArch Arch, typename OP, typename TA, typename DType, int etype> inline PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch> MakePacketPlan(const UnaryMapExp<OP, TA, DType, etype> &e) { return PacketPlan<UnaryMapExp<OP, TA, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.src_)); } template<PacketArch Arch, typename OP, typename TA, typename TB, typename DType, int etype> inline PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch> MakePacketPlan(const BinaryMapExp<OP, TA, TB, DType, etype> &e) { return PacketPlan<BinaryMapExp<OP, TA, TB, DType, etype>, DType, Arch>(MakePacketPlan<Arch>(e.lhs_), MakePacketPlan<Arch>(e.rhs_)); } /*! * \brief static check packet enable * * \tparam Device the type of Device * \tparam dim dimension of the tensor * \tparam E expression */ template<typename E, PacketArch Arch> struct PacketCheck{ static const bool kPass = false; }; template<PacketArch Arch> struct PacketCheck<float, Arch> { static const bool kPass = true; }; template<PacketArch Arch> struct PacketCheck<double, Arch> { static const bool kPass = true; }; template<typename DType, PacketArch Arch> struct PacketCheck<ScalarExp<DType>, Arch> { static const bool kPass = PacketCheck<DType, Arch>::kPass; }; template<int dim, typename DType, PacketArch Arch> struct PacketCheck<Tensor<cpu, dim, DType>, Arch> { static const bool kPass = PacketCheck<DType, Arch>::kPass; }; template<typename OP, typename TA, typename DType, int etype, PacketArch Arch> struct PacketCheck<UnaryMapExp<OP, TA, DType, etype>, Arch> { static const bool kPass = PacketCheck<TA, Arch>::kPass && packet::PacketOp<OP, DType, Arch>::kEnabled; }; template<typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch> struct PacketCheck< BinaryMapExp<OP, TA, TB, DType, etype>, Arch> { static const bool kPass = packet::PacketOp<OP, DType, Arch>::kEnabled && PacketCheck<TA, Arch>::kPass && PacketCheck<TB, Arch>::kPass; }; //---------------------------------------------------- // Check if data is aligned and allow packet operation //---------------------------------------------------- template<int dim, typename E, PacketArch Arch> struct PacketAlignCheck { inline static bool Check(const E &exp) { return false; } }; template<int dim, typename DType, PacketArch Arch> struct PacketAlignCheck<dim, ScalarExp<DType>, Arch> { inline static bool Check(const ScalarExp<DType> &exp) { return true; } }; template<int dim, typename DType, PacketArch Arch> struct PacketAlignCheck<dim, Tensor<cpu, dim, DType>, Arch> { inline static bool Check(const Tensor<cpu, dim, DType> &t) { return packet::CheckAlign<Arch>(t.dptr_) && packet::CheckAlign<Arch>(t.stride_ * sizeof(DType)); } }; template<int dim, typename OP, typename TA, typename DType, int etype, PacketArch Arch> struct PacketAlignCheck<dim, UnaryMapExp<OP, TA, DType, etype>, Arch> { inline static bool Check(const UnaryMapExp<OP, TA, DType, etype> &t) { return PacketAlignCheck<dim, TA, Arch>::Check(t.src_); } }; template<int dim, typename OP, typename TA, typename TB, typename DType, int etype, PacketArch Arch> struct PacketAlignCheck<dim, BinaryMapExp<OP, TA, TB, DType, etype>, Arch> { inline static bool Check(const BinaryMapExp<OP, TA, TB, DType, etype> &t) { return PacketAlignCheck<dim, TA, Arch>::Check(t.lhs_) && PacketAlignCheck<dim, TB, Arch>::Check(t.rhs_); } }; /*! * \brief use PacketPlan to compute result */ template<typename SV, typename E, int dim, typename DType, PacketArch Arch> inline void MapPacketPlan(Tensor<cpu, dim, DType> _dst, const expr::PacketPlan<E, DType, Arch>& plan) { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); const index_t xlen = packet::LowerAlign<DType, Arch>(dst.size(1)); const size_t packetSize = packet::Packet<DType, Arch>::size; #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t x = 0; x < xlen; x += packetSize) { packet::Saver<SV, DType, Arch>::Save(&dst[y][x], plan.EvalPacket(y, x)); } for (index_t x = xlen; x < dst.size(1); ++x) { SV::Save(dst[y][x], plan.Eval(y, x)); } } } } // namespace expr } // namespace mshadow #endif // MSHADOW_PACKET_INL_H_
quantize.h
// Copyright 2018 The MACE Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_UTILS_QUANTIZE_H_ #define MACE_UTILS_QUANTIZE_H_ #include <algorithm> #include <cmath> #include <limits> namespace mace { template<typename T> inline void AdjustRange(const float in_min_data, const float in_max_data, const bool non_zero, float *scale, int32_t *zero_point) { // re-range to make range include zero float and // make zero float as integer u8 const T quantized_min = std::numeric_limits<T>::lowest(); const T quantized_max = std::numeric_limits<T>::max(); if (quantized_min < 0) { MACE_ASSERT(!non_zero, "Cannot nudge to non_zero quantize value."); } float out_max = std::max(0.f, in_max_data); float out_min = std::min(0.f, in_min_data); // make in_min_data quantize as greater than 1 if (non_zero) { out_min = std::min(out_min, in_min_data - (out_max - in_min_data) / (quantized_max - quantized_min - 1)); } *scale = (out_max - out_min) / (quantized_max - quantized_min); const float kEps = 1e-6; if (out_min < -kEps && out_max > kEps) { float quantized_zero = -out_min / *scale; int32_t quantized_zero_near_int = static_cast<int32_t>(roundf(quantized_zero)); *zero_point = quantized_zero_near_int; if (fabs(quantized_zero - quantized_zero_near_int) > kEps) { if (quantized_zero < quantized_zero_near_int || non_zero) { // keep out_max fixed, and move out_min *zero_point = static_cast<int32_t>(std::ceil(quantized_zero)); *scale = out_max / (quantized_max - *zero_point); } else { // keep out_min fixed, and move out_max *scale = out_min / (quantized_min - *zero_point); } } } else if (out_min > -kEps) { *zero_point = quantized_min; } else { *zero_point = quantized_max; } } template<typename T> inline T Saturate(float value) { int rounded_value = static_cast<int>(value); if (rounded_value <= std::numeric_limits<T>::lowest()) { return std::numeric_limits<T>::lowest(); } else if (rounded_value >= std::numeric_limits<T>::max()) { return std::numeric_limits<T>::max(); } else { return static_cast<T>(rounded_value); } } inline void FindMinMax(const float *input, const index_t size, float *min_val, float *max_val) { float max_v = std::numeric_limits<float>::lowest(); float min_v = std::numeric_limits<float>::max(); for (index_t i = 0; i < size; ++i) { max_v = std::max(max_v, input[i]); min_v = std::min(min_v, input[i]); } *min_val = min_v; *max_val = max_v; } template<typename T> inline void QuantizeWithScaleAndZeropoint(const float *input, const index_t size, float scale, int32_t zero_point, T *output) { float recip_scale = 1 / scale; #pragma omp parallel for schedule(runtime) for (int i = 0; i < size; ++i) { output[i] = Saturate<T>(roundf(zero_point + recip_scale * input[i])); } } template<typename T> inline void Quantize(const float *input, const index_t size, bool non_zero, T *output, float *scale, int32_t *zero_point) { float in_min_data; float in_max_data; FindMinMax(input, size, &in_min_data, &in_max_data); AdjustRange<T>(in_min_data, in_max_data, non_zero, scale, zero_point); QuantizeWithScaleAndZeropoint(input, size, *scale, *zero_point, output); } template<typename T> inline void Dequantize(const T *input, const index_t size, const float scale, const int32_t zero_point, float *output) { #pragma omp parallel for schedule(runtime) for (int i = 0; i < size; ++i) { output[i] = scale * (input[i] - zero_point); } } inline void QuantizeMultiplier(double multiplier, int32_t* output_multiplier, int32_t* shift) { if (multiplier == 0.f) { *output_multiplier = 0; *shift = 0; return; } const double q = std::frexp(multiplier, shift); auto qint = static_cast<int64_t>(roundl(q * (1ll << 31))); if (qint == (1ll << 31)) { qint /= 2; ++*shift; } *output_multiplier = static_cast<int32_t>(qint); MACE_CHECK(*output_multiplier <= std::numeric_limits<int32_t>::max()); } inline void GetOutputMultiplierAndShift( const float lhs_scale, const float rhs_scale, const float output_scale, int32_t *quantized_multiplier, int *right_shift) { float real_multiplier = lhs_scale * rhs_scale / output_scale; MACE_CHECK(real_multiplier > 0.f && real_multiplier < 1.f, real_multiplier); int exponent; QuantizeMultiplier(real_multiplier, quantized_multiplier, &exponent); *right_shift = -exponent; MACE_CHECK(*right_shift >= 0); } } // namespace mace #endif // MACE_UTILS_QUANTIZE_H_
GB_unop__abs_fp32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__abs_fp32_fc32 // op(A') function: GB_unop_tran__abs_fp32_fc32 // C type: float // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = cabsf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cabsf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = cabsf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__abs_fp32_fc32 ( float *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cabsf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = cabsf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__abs_fp32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
axpy.c
// Experimental test input for Accelerator directives // simplest scalar*vector operations // Liao 1/15/2013 #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/timeb.h> #define NUM_RUNS 10 double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } /* change this to do saxpy or daxpy : single precision or double precision*/ #define REAL double #define VEC_LEN 1024000 //use a fixed number for now /* zero out the entire vector */ void zero(REAL *A, int n) { int i; for (i = 0; i < n; i++) { A[i] = 0.0; } } /* initialize a vector with random floating point numbers */ void init(REAL *A, int n) { int i; for (i = 0; i < n; i++) { A[i] = (double)drand48(); } } /*serial version */ void axpy(REAL* x, REAL* y, long n, REAL a) { int i; for (i = 0; i < n; i++) { y[i] += a * x[i]; } } /* compare two arrays and return percentage of difference */ REAL check(REAL*A, REAL*B, int n) { int i; REAL diffsum =0.0, sum = 0.0; for (i = 0; i < n; i++) { diffsum += fabs(A[i] - B[i]); sum += fabs(B[i]); } return diffsum/sum; } void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) { int i; /* //implementation of the following omp target region #pragma omp target teams distribute parallel for device (0) map(tofrom: y[0:n]) map(to: x[0:n],a,n) shared(x, y, n, a) private(i) for (i = 0; i < n; ++i) y[i] += a * x[i]; */ #pragma omp target map(tofrom: y[0:n]) map(to: x[0:n],a,n) #pragma omp parallel for shared(x, y, n, a) private(i) //#pragma omp target teams distribute parallel for map(tofrom: y[0:n]) map(to: x[0:n],a,n) shared(x, y, n, a) private(i) num_teams(256) num_threads(1024) for (i = 0; i < n; i++) y[i] += a * x[i]; } int main(int argc, char *argv[]) { int n; REAL *y_ompacc, *y, *x; REAL a = 123.456; n = 1 << 23; // 2^23, 8 million fprintf(stderr, "Usage: axpy <n>, where the problem size is 2^n.\n"); if (argc >= 2) { n = 1 << atoi(argv[1]); } y_ompacc = (REAL *) malloc(n * sizeof(REAL)); y = (REAL *) malloc(n * sizeof(REAL)); x = (REAL *) malloc(n * sizeof(REAL)); srand48(1<<12); init(x, n); init(y_ompacc, n); memcpy(y, y_ompacc, n*sizeof(REAL)); axpy(x, y, n, a); int i; double elapsed = read_timer_ms(); for (i = 0; i < NUM_RUNS; i++) axpy_ompacc(x, y, n, a); elapsed = (read_timer_ms() - elapsed)/NUM_RUNS; REAL checkresult = check(y_ompacc, y, n); fprintf(stderr, "axpy(%d): checksum: %g, time: %0.2fms\n", n, checkresult, elapsed); //assert (checkresult < 1.0e-10); printf("%g", elapsed); free(y_ompacc); free(y); free(x); return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This represents the stack of attributes that were pushed by /// \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { clang::Module *Module = nullptr; bool ModuleInterface = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); // TVM local begin const CXXRecordDecl *FindDecomposableBaseClass(SourceLocation Loc, const CXXRecordDecl *RD, CXXCastPath &BasePath); // TVM local end void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' Partition, ///< 'module partition X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); /// Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf ///< Condition in a constexpr if statement. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. /// /// \param AllowTopLevelCond Whether to allow the result to be the /// complete top-level condition. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive, bool allowArrayTypes); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation()); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// Called on well-formed '\#pragma clang attribute push'. void ActOnPragmaAttributePush(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); void AddNSConsumedAttr(SourceRange AttrRange, Decl *D, unsigned SpellingListIndex, bool isNSConsumed, bool isTemplateInstantiation); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Set to true inside '#pragma omp declare target' region. bool IsInOpenMPDeclareTargetContext = false; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D) const; ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return IsInOpenMPDeclareTargetContext; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> CUDADeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> CUDAKnownEmittedFns; /// A partial call graph maintained during CUDA compilation to support /// deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to CUDAKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> CUDACallGraph; /// Diagnostic builder for CUDA errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class CUDADiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); ~CUDADiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (CUDADiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a CUDADiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiag.hasValue()) *Diag.PartialDiag << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<PartialDiagnostic> PartialDiag; }; /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDefaultedMemberExceptionSpecs.empty() && "there shouldn't be any pending delayed defaulted member " "exception specs"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedExceptionSpecChecks) SavedExceptionSpecChecks; decltype(DelayedDefaultedMemberExceptionSpecs) SavedDefaultedMemberExceptionSpecs; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedExceptionSpecChecks.swap(S.DelayedExceptionSpecChecks); SavedDefaultedMemberExceptionSpecs.swap( S.DelayedDefaultedMemberExceptionSpecs); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
util.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "thd_info.h" #include "util.h" /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ val_t rand_val(void) { /* TODO: modify this to work based on the size of idx_t */ val_t v = 3.0 * ((val_t) rand() / (val_t) RAND_MAX); if(rand() % 2 == 0) { v *= -1; } return v; } idx_t rand_idx(void) { /* TODO: modify this to work based on the size of idx_t */ return (idx_t) (rand() << 16) | rand(); } void fill_rand( val_t * const restrict vals, idx_t const nelems) { for(idx_t i=0; i < nelems; ++i) { vals[i] = 1; //rand_val(); } } char * bytes_str( size_t const bytes) { double size = (double)bytes; int suff = 0; const char *suffix[5] = {"B", "KB", "MB", "GB", "TB"}; while(size > 1024 && suff < 5) { size /= 1024.; ++suff; } char * ret = NULL; if(asprintf(&ret, "%0.2f%s", size, suffix[suff]) == -1) { fprintf(stderr, "SPLATT: asprintf failed with %zu bytes.\n", bytes); ret = NULL; } return ret; } idx_t argmax_elem( idx_t const * const arr, idx_t const N) { idx_t mkr = 0; for(idx_t i=1; i < N; ++i) { if(arr[i] > arr[mkr]) { mkr = i; } } return mkr; } idx_t argmin_elem( idx_t const * const arr, idx_t const N) { idx_t mkr = 0; for(idx_t i=1; i < N; ++i) { if(arr[i] < arr[mkr]) { mkr = i; } } return mkr; } int * get_primes( int N, int * nprimes) { int size = 10; int * p = (int *) splatt_malloc(size * sizeof(int)); int np = 0; while(N != 1) { int i; for(i=2; i <= N; ++i) { if(N % i == 0) { /* found the next prime */ break; } } /* realloc if necessary */ if(size == np) { p = (int *) realloc(p, size * 2 * sizeof(int)); } p[np++] = i; N /= i; } *nprimes = np; return p; } void par_memcpy( void * const restrict dst, void const * const restrict src, size_t const bytes) { #pragma omp parallel { int nthreads = splatt_omp_get_num_threads(); int tid = splatt_omp_get_thread_num(); size_t n_per_thread = (bytes + nthreads - 1)/nthreads; size_t n_begin = SS_MIN(n_per_thread * tid, bytes); size_t n_end = SS_MIN(n_begin + n_per_thread, bytes); memcpy((char *)dst + n_begin, (char *)src + n_begin, n_end - n_begin); } }
affinity_values.c
// RUN: %libomp-compile // RUN: env OMP_PROC_BIND=close OMP_PLACES=threads %libomp-run // RUN: env OMP_PROC_BIND=close OMP_PLACES=cores %libomp-run // RUN: env OMP_PROC_BIND=close OMP_PLACES=sockets %libomp-run // RUN: env KMP_AFFINITY=compact %libomp-run // RUN: env KMP_AFFINITY=scatter %libomp-run // REQUIRES: affinity #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define XSTR(x) #x #define STR(x) XSTR(x) #define streqls(s1, s2) (!strcmp(s1, s2)) #define check(condition) \ if (!(condition)) { \ fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \ __LINE__); \ exit(1); \ } #define DEBUG 0 #if DEBUG #include <stdarg.h> #endif #define BUFFER_SIZE 1024 char buf[BUFFER_SIZE]; #pragma omp threadprivate(buf) static int debug_printf(const char* format, ...) { int retval = 0; #if DEBUG va_list args; va_start(args, format); retval = vprintf(format, args); va_end(args); #endif return retval; } static void display_affinity_environment() { #if DEBUG printf("Affinity Environment:\n"); printf(" OMP_PROC_BIND=%s\n", getenv("OMP_PROC_BIND")); printf(" OMP_PLACES=%s\n", getenv("OMP_PLACES")); printf(" KMP_AFFINITY=%s\n", getenv("KMP_AFFINITY")); #endif } // Reads in a list of integers into ids array (not going past ids_size) // e.g., if affinity = "0-4,6,8-10,14,16,17-20,23" // then ids = [0,1,2,3,4,6,8,9,10,14,16,17,18,19,20,23] void list_to_ids(const char* affinity, int* ids, int ids_size) { int id, b, e, ids_index; char *aff, *begin, *end, *absolute_end; aff = strdup(affinity); absolute_end = aff + strlen(aff); ids_index = 0; begin = end = aff; while (end < absolute_end) { end = begin; while (*end != '\0' && *end != ',') end++; *end = '\0'; if (strchr(begin, '-') != NULL) { // Range sscanf(begin, "%d-%d", &b, &e); } else { // Single Number sscanf(begin, "%d", &b); e = b; } for (id = b; id <= e; ++id) { ids[ids_index++] = id; if (ids_index >= ids_size) { free(aff); return; } } begin = end + 1; } free(aff); } void check_thread_affinity() { int i; const char *formats[2] = {"%{thread_affinity}", "%A"}; for (i = 0; i < sizeof(formats) / sizeof(formats[0]); ++i) { omp_set_affinity_format(formats[i]); #pragma omp parallel { int j, k; int place = omp_get_place_num(); int num_procs = omp_get_place_num_procs(place); int *ids = (int *)malloc(sizeof(int) * num_procs); int *ids2 = (int *)malloc(sizeof(int) * num_procs); char buf[256]; size_t n = omp_capture_affinity(buf, 256, NULL); check(n <= 256); omp_get_place_proc_ids(place, ids); list_to_ids(buf, ids2, num_procs); #pragma omp for schedule(static) ordered for (k = 0; k < omp_get_num_threads(); ++k) { #pragma omp ordered { debug_printf("Thread %d: captured affinity = %s\n", omp_get_thread_num(), buf); for (j = 0; j < num_procs; ++j) { debug_printf("Thread %d: ids[%d] = %d ids2[%d] = %d\n", omp_get_thread_num(), j, ids[j], j, ids2[j]); check(ids[j] == ids2[j]); } } } free(ids); free(ids2); } } } int main(int argc, char** argv) { omp_set_nested(1); display_affinity_environment(); check_thread_affinity(); return 0; }
Network.h
/* * Network.h * * Created by Guido Novati on 30.10.18. * Copyright 2018 ETH Zurich. All rights reserved. * */ #pragma once #include "Layers.h" struct Network { std::mt19937 gen; // Vector of layers, each defines a forward and bckward operation: std::vector<Layer*> layers; // Vector of parameters of each layer (two vectors must have the same size) // Each Params contains the matrices of parameters needed by the corresp layer std::vector<Params*> params; // Vector of grads for each parameter. By definition they have the same size std::vector<Params*> grads; // Memory space where each layer can compute its output and gradient: std::vector<Activation*> workspace; // Number of inputs to the network: int nInputs = 0; // Number of network outputs: int nOutputs = 0; Network(const int seed = 0) : gen(seed) {}; std::vector<std::vector<Real>> forward( // one vector of input for each element in the mini-batch: const std::vector<std::vector<Real>> I, // layer ID at which to start forward operation: const size_t layerStart = 0 // (zero means compute from input to output) ) { if(params.size()==0 || grads.size()==0 || layers.size()==0) { printf("Attempted to access uninitialized network. Aborting\n"); abort(); } // input is a minibatch of datapoints: one vector for each datapoint: const size_t batchSize = I.size(); // allocate workspaces where we can write output of each layer clearWorkspace(); workspace = allocateActivation(batchSize); // User can overwrite the output of any upper layer (marked by layerStart) // in order to see what happens if layer layerStart has a predefined output. // ( this allows visualizing PCA components! ) const int inputLayerSize = workspace[layerStart]->layersSize; //copy input onto output of input layer: #pragma omp parallel for for (size_t b=0; b<batchSize; b++) { assert(I[b].size() == (size_t) inputLayerSize ); // Input to the network is the output of input layer. // Respective workspace is a matrix of size [batchSize]x[nInputs] // Here we use row-major ordering: nInputs is the number of columns. Real* const input_b = workspace[layerStart]->output + b * inputLayerSize; // copy from function argument to workspace: std::copy(I[b].begin(), I[b].end(), input_b); } // Start from layer after input. E.g. Input layer is 0. No need to backprop // input layer has it has no parameters. for (size_t j=layerStart+1; j<layers.size(); j++) layers[j]->forward(workspace, params); // copy output into vector of vectors: one vector for each element of batch std::vector<std::vector<Real>> O(batchSize, std::vector<Real>(nOutputs, 0)); #pragma omp parallel for for (size_t b=0; b<batchSize; b++) { assert(nOutputs == workspace.back()->layersSize); // network output is the output of last layer. // Respective workspace is a matrix of size [batchSize]x[nOutputs] // Here we use row-major ordering: nOutputs is the number of columns. Real* const output_b = workspace.back()->output + b * nOutputs; // copy from function argument to workspace: std::copy(output_b, output_b + nOutputs, O[b].begin()); } return O; } void bckward( // vector of size of mini-batch of gradients of error wrt to network output const std::vector<std::vector<Real>> E, // layer ID at which forward operation was started: const size_t layerStart=0 // (zero means compute from input to output) ) const { //this function assumes that we already called forward propagation if(params.size()==0 || grads.size()==0 || layers.size()==0) { printf("Attempted to access uninitialized network. Aborting\n"); abort(); } // input is a minibatch of datapoints: one vector for each datapoint: const size_t batchSize = E.size(); assert( (size_t) workspace.back()->batchSize == batchSize); // first, clear memory over which we'll write gradients: for(auto& p : workspace) p->clearErrors(); //copy input onto output of input layer: #pragma omp parallel for for (size_t b=0; b<batchSize; b++) { assert(E[b].size() == (size_t) nOutputs); // Write d Err / d Out onto last layer of the network. // Respective workspace is a matrix of size [batchSize]x[nOutputs] // Here we use row-major ordering: nOutputs is the number of columns. Real* const errors_b = workspace.back()->dError_dOutput + b * nOutputs; // copy from function argument to workspace: std::copy(E[b].begin(), E[b].end(), errors_b); } // Backprop starts at the last layer, which computes gradient of error wrt // to its parameters and gradient of error wrt to it's input. // Last layer to backprop is the one above input layer. Eg. if layerStart=0 // Then input layer was 0, which has no parametes and has no inputs to // backprp the error grad to, last layer to backprop is layer 1. for (size_t i = layers.size()-1; i >= layerStart + 1; i--) layers[i]->bckward(workspace, params, grads); } // Helper function for forward with batchsize = 1 std::vector<Real> forward(const std::vector<Real>I, const size_t layerStart=0) { std::vector<std::vector<Real>> vecI (1, I); std::vector<std::vector<Real>> vecO = forward(vecI, layerStart); return vecO[0]; } // Helper function for forward with batchsize = 1) void bckward(const std::vector<Real> E, const size_t layerStart = 0) const { std::vector<std::vector<Real>> vecE (1, E); bckward(vecE, layerStart); } void save() const { for(const auto &l : layers) l->save(params); } void restart() const { for(const auto &l : layers) l->restart(params); } ~Network() { for(auto& p : grads) _dispose_object(p); for(auto& p : params) _dispose_object(p); for(auto& p : layers) _dispose_object(p); for(auto& p : workspace) _dispose_object(p); } inline void clearWorkspace() { for(auto& p : workspace) _dispose_object(p); workspace.clear(); } // Function to loop over layers and allocate workspace for network operations: inline std::vector<Activation*> allocateActivation(size_t batchSize) const { std::vector<Activation*> ret(layers.size(), nullptr); for(size_t j=0; j<layers.size(); j++) ret[j] = layers[j]->allocateActivation(batchSize); return ret; } // Function to loop over layers and allocate memory space for parameter grads: inline std::vector<Params*> allocateGrad() const { std::vector<Params*> ret(layers.size(), nullptr); for(size_t j=0; j<layers.size(); j++) ret[j] = layers[j]->allocate_params(); return ret; } ////////////////////////////////////////////////////////////////////////////// /// Functions to build the network are defined in Network_buildFunctions.h /// ////////////////////////////////////////////////////////////////////////////// template<int size> void addInput(); template<int nInputs, int size> void addLinear(const std::string fname = std::string()); template<int size> void addTanh(); }; #include "Network_buildFunctions.h"
GB_unop__identity_int64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_int32) // op(A') function: GB (_unop_tran__identity_int64_int32) // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_int32) ( int64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Test_IO_Compressed.h
/* * Test_IO_Compressed.h * CubismZ * * Copyright 2018 ETH Zurich. All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include "Types_single.h" #include "SerializerIO_WaveletCompression_MPI_Simple.h" #include "Reader_WaveletCompression.h" #include <ArgumentParser.h> #include <GridMPI.h> #define VERBOSE 0 #ifdef _USE_HDF_ #include <hdf5.h> #endif #ifdef _FLOAT_PRECISION_ #define HDF_REAL H5T_NATIVE_FLOAT #else #define HDF_REAL H5T_NATIVE_DOUBLE #endif //#define _PARALLEL_IO_ //#define _COLLECTIVE_IO_ typedef GridMPI < FluidGrid > G; class Test_IO_Compressed : public Simulation { protected: int BPDX, BPDY, BPDZ; int NPROCX, NPROCY, NPROCZ; int channel; int step_id; bool VERBOSITY; G * grid; ArgumentParser parser; string inputfile_name, inputfile_dataset, outputfile_name; int myrank, size; SerializerIO_WaveletCompression_MPI_SimpleBlocking<G, StreamerGridPointIterative> mywaveletdumper; void _ic(G& grid) { vector<BlockInfo> vInfo = grid.getResidentBlocksInfo(); #if VERBOSE Real min_u = 1e8, max_u = -1e8; #endif int myrank, mypeindex[3], pesize[3]; int periodic[3]; MPI_Comm cartcomm; periodic[0] = 1; periodic[1] = 1; periodic[2] = 1; pesize[0] = NPROCX; pesize[1] = NPROCY; pesize[2] = NPROCZ; MPI_Comm_size(MPI_COMM_WORLD, &size); assert(NPROCX*NPROCY*NPROCZ == size); MPI_Cart_create(MPI_COMM_WORLD, 3, pesize, periodic, true, &cartcomm); MPI_Comm_rank(cartcomm, &myrank); MPI_Cart_coords(cartcomm, myrank, 3, mypeindex); // open HDF5 // read data block // store in grid blocks int rank; char filename[256]; herr_t status; hid_t file_id, dataset_id, fspace_id, fapl_id, mspace_id; MPI_Comm_rank(MPI_COMM_WORLD, &rank); int coords[3]; coords[0] = mypeindex[0]; coords[1] = mypeindex[1]; coords[2] = mypeindex[2]; const int NX = BPDX*_BLOCKSIZE_; const int NY = BPDY*_BLOCKSIZE_; const int NZ = BPDZ*_BLOCKSIZE_; #if VERBOSE printf("coords = %d,%d,%d\n", coords[0], coords[1], coords[2]); printf("NX,NY,NZ = %d,%d,%d\n", NX, NY, NZ); #endif static const int NCHANNELS = TOTAL_CHANNELS; Real * array_all = new Real[NX * NY * NZ * NCHANNELS]; vector<BlockInfo> vInfo_local = grid.getResidentBlocksInfo(); static const int sX = 0; static const int sY = 0; static const int sZ = 0; const int eX = _BLOCKSIZE_; const int eY = _BLOCKSIZE_; const int eZ = _BLOCKSIZE_; hsize_t count[4] = { (hsize_t)NX, (hsize_t)NY, (hsize_t)NZ, (hsize_t)NCHANNELS}; // global domain size as specified by the process grid, the block layout and the block size hsize_t dims[4] = { (hsize_t)grid.getBlocksPerDimension(0)*_BLOCKSIZE_, (hsize_t)grid.getBlocksPerDimension(1)*_BLOCKSIZE_, (hsize_t)grid.getBlocksPerDimension(2)*_BLOCKSIZE_, (hsize_t)NCHANNELS}; hsize_t offset[4] = { (hsize_t)coords[0]*NX, (hsize_t)coords[1]*NY, (hsize_t)coords[2]*NZ, (hsize_t)0}; #if VERBOSE printf("count = [%d, %d, %d, %d]\n", count[0], count[1], count[2], count[3]); printf("dims = [%d, %d, %d, %d]\n", dims[0], dims[1], dims[2], dims[3]); printf("offset = [%d, %d, %d, %d]\n", offset[0], offset[1], offset[2], offset[3]); #endif sprintf(filename, "%s", inputfile_name.c_str()); H5open(); fapl_id = H5Pcreate(H5P_FILE_ACCESS); #if defined(_PARALLEL_IO_) status = H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL); #endif file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); status = H5Pclose(fapl_id); dataset_id = H5Dopen2(file_id, inputfile_dataset.c_str(), H5P_DEFAULT); fapl_id = H5Pcreate(H5P_DATASET_XFER); #if defined(_COLLECTIVE_IO_) H5Pset_dxpl_mpio(fapl_id, H5FD_MPIO_COLLECTIVE); #endif fspace_id = H5Dget_space(dataset_id); if (myrank == 0) { // Before reading any data, we perform a check that we have specified the correct and exact domain size //https://stackoverflow.com/questions/15786626/get-the-dimensions-of-a-hdf5-dataset //If your data space is simple (i.e. not null or scalar), then you can get the number of dimensions using H5Sget_simple_extent_ndims: const int ndims = H5Sget_simple_extent_ndims(fspace_id); //and the size of each dimension using H5Sget_simple_extent_dims: hsize_t hdims[ndims]; H5Sget_simple_extent_dims(fspace_id, hdims, NULL); //The dimensions are now stored in hdims. for (int i = 0; i < ndims; i++) printf("hdims[%d] = %llu\n", i, hdims[i]); for (int i = 0; i < ndims; i++) if (hdims[i] != dims[i]) { printf("The dataset size does not match the one specified by the user. Aborting... \n"); MPI_Abort(MPI_COMM_WORLD, 1); } } MPI_Barrier(MPI_COMM_WORLD); H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, offset, NULL, count, NULL); mspace_id = H5Screate_simple(4, count, NULL); status = H5Dread(dataset_id, HDF_REAL, mspace_id, fspace_id, fapl_id, array_all); MPI_Barrier(MPI_COMM_WORLD); // fill in the blocks #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0; i<(int)vInfo.size(); i++) { BlockInfo info = vInfo[i]; const int idx[3] = {info.index[0], info.index[1], info.index[2]}; FluidBlock& b = *(FluidBlock*)info.ptrBlock; #if VERBOSE printf("idx=[%d,%d,%d]\n", idx[0], idx[1], idx[2]); #endif for(int ix=sX; ix<eX; ix++) for(int iy=sY; iy<eY; iy++) for(int iz=sZ; iz<eZ; iz++) { const int gx = idx[0]*_BLOCKSIZE_ + ix; const int gy = idx[1]*_BLOCKSIZE_ + iy; const int gz = idx[2]*_BLOCKSIZE_ + iz; Real * const ptr_input = array_all + NCHANNELS*(gz + NZ * (gy + NY * gx)); Real val = ptr_input[channel]; b(ix, iy, iz).u = val; #if VERBOSE if (val < min_u) min_u = val; if (val > max_u) max_u = val; #endif } } #if VERBOSE printf("min_u = %lf max_u = %lf\n", min_u, max_u); #endif status = H5Pclose(fapl_id); status = H5Dclose(dataset_id); status = H5Sclose(fspace_id); status = H5Sclose(mspace_id); status = H5Fclose(file_id); H5close(); MPI_Comm_free(&cartcomm); delete [] array_all; } void _setup_mpi_constants(int& nprocx, int& nprocy, int& nprocz) { nprocx = parser("-nprocx").asInt(1); nprocy = parser("-nprocy").asInt(1); nprocz = parser("-nprocz").asInt(1); } public: const bool isroot; Test_IO_Compressed(const bool isroot, const int argc, const char ** argv): grid(NULL), parser(argc,argv), isroot(isroot) { } void setup() { parser.unset_strict_mode(); BPDX = parser("-bpdx").asInt(1); BPDY = parser("-bpdy").asInt(1); BPDZ = parser("-bpdz").asInt(1); step_id = parser("-stepid").asInt(0); channel = parser("-channel").asInt(0); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); inputfile_name = parser("-h5file").asString("none"); inputfile_dataset = parser("-dataset").asString("/data"); outputfile_name = parser("-czfile").asString("none"); if (parser.exist("-help") || ((inputfile_name == "none")||(outputfile_name == "none"))) { printf("Usage: %s -h5file <hdf5 file> -czfile <cz file> -threshold <e> [-wtype <wt>] [-bpdx <nbx>] [-bpdy <nby>] [-bpdz <nbz>] [-nprocx <npx>] [-nprocy <npy>] [-nprocz <npz>]\n", "hdf2cz"); exit(1); } _setup_mpi_constants(NPROCX, NPROCY, NPROCZ); VERBOSITY = 0; if (isroot) VERBOSITY = 1; if (VERBOSITY) { printf("////////////////////////////////////////////////////////////\n"); printf("//////////// TEST IO Compressed MPI ///////////////\n"); printf("////////////////////////////////////////////////////////////\n"); } parser.loud(); if (!isroot) parser.mute(); grid = new G(NPROCX, NPROCY, NPROCZ, BPDX, BPDY, BPDZ); assert(grid != NULL); _ic(*grid); vp(*grid, step_id); } void vp(G& grid, const int step_id) { if (isroot) std::cout << "Creating MPI CZ dump...\n" ; const string path = parser("-fpath").asString("."); const int wtype = parser("-wtype").asInt(3); //3rd order average interpolating wavelets std::stringstream streamer; streamer<<path; streamer<<"/"; streamer<<outputfile_name; #if defined(_USE_SZ_) SZ_Init((char *)"sz.config"); #ifdef _OPENMP omp_set_num_threads(1); #endif #endif double threshold = parser("-threshold").asDouble(0); mywaveletdumper.verbose(); if (isroot) printf("setting threshold to %f\n", threshold); mywaveletdumper.set_threshold(threshold); mywaveletdumper.set_wtype_write(wtype); MPI_Barrier(MPI_COMM_WORLD); double t0 = MPI_Wtime(); mywaveletdumper.Write<0>(grid, streamer.str()); double t1 = MPI_Wtime(); if (isroot) std::cout << "done" << endl; int nthreads; #ifdef _OPENMP #pragma omp parallel #pragma omp master nthreads = omp_get_num_threads(); #else nthreads = 1; #endif if (isroot) std::cout << "Threads: " << nthreads << " Elapsed time: " << t1-t0 << " seconds" << endl; } void dispose() { #if defined(_USE_SZ_) SZ_Finalize(); #endif if (grid!=NULL) { delete grid; grid = NULL; } } };
GB_unaryop__minv_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_uint64 // op(A') function: GB_tran__minv_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rminus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint32) // A*D function (colscale): GB (_AxD__rminus_uint32) // D*A function (rowscale): GB (_DxB__rminus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint32) // C=scalar+B GB (_bind1st__rminus_uint32) // C=scalar+B' GB (_bind1st_tran__rminus_uint32) // C=A+scalar GB (_bind2nd__rminus_uint32) // C=A'+scalar GB (_bind2nd_tran__rminus_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT32 || GxB_NO_RMINUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
neuron.h
/* * Architektury výpočetních systémů (AVS 2019) * Projekt c. 1 (ANN) * Login: xstupi00 */ /** * @brief Returns output of the neuron as product of inputs, sums and bias * @param inputSize - number of inputs the neuron * @param input - pointer to neuron input array (identical for all neurons in the layer) * @param weights - pointer to weights for all neurons in the layer * @param bias - bias value of the neuron * @return Output of the neuron */ // #pragma omp declare simd uniform(inputSize, input) linear(weight:512) simdlen(8) notinbranch // #pragma omp declare simd uniform(inputSize, input) linear(weight:784) simdlen(8) notinbranch #pragma omp declare simd uniform(inputSize, input) linear(weight) simdlen(8) notinbranch float evalNeuron( size_t inputSize, const float* input, const float* weight, float bias );
GB_binop__eq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__eq_fp32 // A.*B function (eWiseMult): GB_AemultB__eq_fp32 // A*D function (colscale): GB_AxD__eq_fp32 // D*A function (rowscale): GB_DxB__eq_fp32 // C+=B function (dense accum): GB_Cdense_accumB__eq_fp32 // C+=b function (dense accum): GB_Cdense_accumb__eq_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_fp32 // C=scalar+B GB_bind1st__eq_fp32 // C=scalar+B' GB_bind1st_tran__eq_fp32 // C=A+scalar GB_bind2nd__eq_fp32 // C=A'+scalar GB_bind2nd_tran__eq_fp32 // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__eq_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__eq_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__eq_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__eq_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__eq_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__eq_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__eq_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__eq_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__eq_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__eq_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__eq_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix.h
#ifndef TMS_SUBMOD_MATRIX_H #define TMS_SUBMOD_MATRIX_H #include <random> #include <list> #include "mkl.h" #include <assert.h> #include <iomanip> #include "../perf/perf.h" #include "../perf_log.h" #include "../util.h" template<class DT> class Vector; template<class DT> class Matrix { //protected: public: DT * _values; int64_t _m; int64_t _n; int64_t _rs; int64_t _cs; int64_t _base_m; int64_t _base_n; bool _mem_manage; //public: // // Constructors // Matrix(int64_t m, int64_t n) : _m(m), _n(n), _rs(1), _cs(m), _base_m(m), _base_n(n), _mem_manage(true) { const int ret = posix_memalign((void **) &_values, 4096, _m * _n * sizeof(DT)); if (ret != 0) { std::cout << "Could not allocate memory for Matrix. Exiting ..." << std::endl; exit(1); } } Matrix(DT* values, int64_t m, int64_t n, int64_t rs, int64_t cs, int64_t base_m, int64_t base_n, bool mem_manage) : _values(values), _m(m), _n(n), _rs(rs), _cs(cs), _base_m(base_m), _base_n(base_n), _mem_manage(mem_manage) { } Matrix(DT* values, int64_t m, int64_t n, int64_t rs, int64_t cs) : _values(values), _m(m), _n(n), _rs(rs), _cs(cs), _base_m(m), _base_n(n), _mem_manage(false) { } ~Matrix() { if(_mem_manage){ free(_values); } } Matrix& operator=(Matrix&& A) { _m = A._m; _m = A._n; _rs = A._rs; _cs = A._cs; _base_m = A._m; _base_n = A._n; _mem_manage = A._mem_manage; _values = A._values; A._values = nullptr; }; Matrix(Matrix&& A) : _values(A._values), _m(A._m), _n(A._n), _rs(A._rs), _cs(A._cs), _base_m(A._base_m), _base_n(A._base_n), _mem_manage(A._mem_manage) { A._values = nullptr; } Matrix& operator=(const Matrix& A) { _m = A._m; _m = A._n; _base_m = _m; _base_n = _n; if(A._cs == 1) { _cs = 1; _rs = _n; } else { _rs = 1; _cs = _m; } _mem_manage = true; const int ret = posix_memalign((void **) &_values, 4096, _m * _n * sizeof(DT)); if (ret != 0) { std::cout << "Could not allocate memory for Matrix. Exiting ..." << std::endl; exit(1); } this->copy(A); } Matrix(const Matrix& A) : _m(A._m), _n(A._n), _base_m(_m), _base_n(_n), _mem_manage(true) { if(A._cs == 1) { _cs = 1; _rs = _n; } else { _rs = 1; _cs = _m; } _mem_manage = true; const int ret = posix_memalign((void **) &_values, 4096, _m * _n * sizeof(DT)); if (ret != 0) { std::cout << "Could not allocate memory for Matrix. Exiting ..." << std::endl; exit(1); } this->copy(A); } void realloc(int64_t m, int64_t n) { //Can only reallocate "base object" assert(_mem_manage == true); assert(m >= _m && n >= _n); assert(_base_m >= _m && _base_n >= _n); //Allocate new array DT* array; const int ret = posix_memalign((void **) &array, 4096, m * n * sizeof(DT)); if (ret != 0) { std::cout << "Could not allocate memory for Matrix. Exiting ..." << std::endl; exit(1); } //Copy old array over Matrix<DT> tmp(array, m, n, 1, m, m, n, false); auto tmp_partition = tmp.submatrix(0,0,_m,_n); tmp_partition.copy(*this); //Free old array free(_values); //Setup fields _values = array; _m = m; _n = n; _rs = 1; _cs = m; _base_m = m; _base_n = n; } int64_t height() const { return _m; } int64_t width() const { return _n; } DT& operator() (int64_t row, int64_t col) { assert(row < _m && col < _n && "Matrix index out of bounds"); return _values[row * _rs + col * _cs]; } DT operator() (int64_t row, int64_t col) const { assert(row < _m && col < _n && "Matrix index out of bounds"); return _values[row * _rs + col * _cs]; } DT* lea (int64_t row, int64_t col) { assert(row < _m && col < _n && "Matrix index out of bounds"); return &_values[row * _rs + col * _cs]; } const DT* lea (int64_t row, int64_t col) const { assert(row < _m && col < _n && "Matrix index out of bounds"); return &_values[row * _rs + col * _cs]; } // // Acquiring submatrices, subvectors // Matrix<DT> submatrix(int64_t row, int64_t col, int64_t mc, int64_t nc) { assert(row < _m && col < _n && "Matrix index out of bounds."); auto height = std::min(mc, _m - row); auto width = std::min(nc, _n - col); return Matrix<DT>(lea(row,col), height, width, _rs, _cs, _m, _n, false); } Vector<DT> subrow(int64_t row, int64_t col, int64_t nc) { assert(row < _m && col < _n && "Matrix index out of bounds."); auto width = std::min(nc, _n - col); return Vector<DT>(&_values[row*_rs + col*_cs], width, _n, _cs, false); } Vector<DT> subrow(int64_t row) { assert(row < _m && "Matrix index out of bounds."); return Vector<DT>(&_values[row*_rs], _n, _n, _cs, false); } Vector<DT> subcol(int64_t row, int64_t col, int64_t mc) { assert(row < _m && col < _n && "Matrix index out of bounds."); auto height = std::min(mc, _m - row); return Vector<DT>(&_values[row*_rs + col*_cs], height, _m, _rs, false); } Vector<DT> subcol(int64_t col) { assert(col < _n && "Matrix index out of bounds."); return Vector<DT>(&_values[col*_cs], _m, _m, _rs, false); } const Matrix<DT> submatrix(int64_t row, int64_t col, int64_t mc, int64_t nc) const { assert(row < _m && col < _n && "Matrix index out of bounds."); auto height = std::min(mc, _m - row); auto width = std::min(nc, _n - col); return Matrix<DT>(&_values[row*_rs + col*_cs], height, width, _rs, _cs, _m, _n, false); } const Vector<DT> subrow(int64_t row, int64_t col, int64_t nc) const { assert(row < _m && col < _n && "Matrix index out of bounds."); auto width = std::min(nc, _n - col); return Vector<DT>(&_values[row*_rs + col*_cs], width, _n, _cs, false); } const Vector<DT> subrow(int64_t row) const { assert(row < _m && "Matrix index out of bounds."); return Vector<DT>(&_values[row*_rs], _n, _n, _cs, false); } const Vector<DT> subcol(int64_t row, int64_t col, int64_t mc) const { assert(row < _m && col < _n && "Matrix index out of bounds."); auto height = std::min(mc, _m - row); return Vector<DT>(&_values[row*_rs + col*_cs], height, _m, _rs, false); } const Vector<DT> subcol(int64_t col) const { assert(col < _n && "Matrix index out of bounds."); return Vector<DT>(&_values[col*_cs], _m, _m, _rs, false); } // // Routines for setting elements of the matrix // void set_all(DT alpha) { #pragma omp parallel for for(int64_t j = 0; j < _n; j++) { for(int64_t i = 0; i < _m; i++) { (*this)(i,j) = alpha; } } } template<class RNG, class DIST> void fill_rand(RNG &gen, DIST &dist) { for(int64_t j = 0; j < _n; j++) { for(int64_t i = 0; i < _m; i++) { (*this)(i,j) = dist(gen); } } } void fill_rand() { std::random_device rd; std::mt19937 gen{rd()}; std::normal_distribution<> normal(0.0, 1.0); this->fill_rand(gen, normal); } void set_diagonal(DT alpha) { #pragma omp parallel for for(int64_t i = 0; i < std::min(_m, _n); i++) { (*this)(i,i) = alpha; } } void set_subdiagonal(DT alpha) { #pragma omp parallel for for(int64_t i = 0; i < _m; i++) { for(int64_t j = 0; j < i && j < _n; j++) { (*this)(i,j) = alpha; } } } void copy(const Matrix<DT>& other) { assert(_m == other._m && _n == other._n); #pragma omp parallel for for(int j = 0; j < _n; j++) { for(int i = 0; i < _m; i++) { (*this)(i,j) = other(i,j); } } } void copy_permute_rc(const Matrix<DT>& other, const std::vector<int64_t>& p) { assert(_m == other._m && _n == other._n); #pragma omp parallel for for(int j = 0; j < _n; j++) { for(int i = 0; i < _m; i++) { (*this)(i,j) = other(p[i],p[j]); } } } void copy_upper_tri(const Matrix<DT>& other) { assert(_m == other._m && _n == other._n); assert(_m == _n); #pragma omp parallel for for(int j = 0; j < _n; j++) { for(int i = 0; i <= j; i++) { (*this)(i,j) = other(i,j); } } } // // Simple computational routines // void axpby(DT alpha, const Matrix<DT>& other, DT beta) { assert(_m == other._m && _n == other._n); for(int i = 0; i < _m; i++) { for(int j = 0; j < _n; j++) { (*this)(i,j) = beta * (*this)(i,j) + alpha * other(i,j); } } } DT fro_norm() const { DT fro_nrm = 0.0; for(int i = 0; i < _m; i++) { for(int j = 0; j < _n; j++) { fro_nrm += (*this)(i,j)*(*this)(i,j); } } return sqrt(fro_nrm); } void transpose() { std::swap(_m, _n); std::swap(_rs, _cs); std::swap(_base_m, _base_n); } Matrix<DT> transposed() { return Matrix<DT>(_values, _n, _m, _cs, _rs, _base_n, _base_m, false); } const Matrix<DT> transposed() const { return Matrix<DT>(_values, _n, _m, _cs, _rs, _base_n, _base_m, false); } void print(std::string name) const { std::cout << name << std::endl; for(int i = 0; i < _m; i++) { for(int j = 0; j < _n; j++) { std::cout << std::left << std::setprecision(5) << std::setw(12) << (*this)(i,j); } std::cout << std::endl; } std::cout << std::endl; } void print() const { for(int i = 0; i < _m; i++) { for(int j = 0; j < _n; j++) { std::cout << std::left << std::setprecision(5) << std::setw(12) << (*this)(i,j); } std::cout << std::endl; } std::cout << std::endl; } void enlarge_m(int64_t m_inc) { assert(_m + m_inc <= _base_m && "Cannot add row to matrix."); _m += m_inc; } void enlarge_n(int64_t n_inc) { assert(_n + n_inc <= _base_n && "Cannot add colum to matrix."); _n += n_inc; } // // BLAS, LAPACK routines // void mvm(DT, const Vector<DT>&, DT, Vector<DT>&) const { std::cout << "Gemv not implemented for datatype" << std::endl; exit(1); } void trsv(CBLAS_UPLO, Vector<DT>&) const { std::cout << "Trsv not implemented for datatype" << std::endl; exit(1); } void trsm(CBLAS_UPLO, CBLAS_SIDE, Matrix<DT>&) const { std::cout << "Trsv not implemented for datatype" << std::endl; exit(1); } void mmm(DT, const Matrix<DT>&, const Matrix<DT>&, DT) { std::cout << "Gemm not implemented for datatype" << std::endl; exit(1); } void syrk(CBLAS_UPLO, DT, const Matrix<DT>&, DT) { std::cout << "Syrk not implemented for datatype" << std::endl; exit(1); } void qr(Vector<DT>&) { std::cout << "QR factorization not implemented for datatype" << std::endl; exit(1); } void chol(char) { std::cout << "Cholesky factorization not implemented for datatype" << std::endl; exit(1); } void apply_q(Vector<DT>&, Matrix<DT>&) const { std::cout << "Applying Q from qr factorization not implemented for datatype" << std::endl; exit(1); } void tpqr(Matrix<DT>&, Matrix<DT>&, int64_t, int64_t) { std::cout << "TPQR factorization not implemented for datatype" << std::endl; exit(1); } void apply_tpq(Matrix<DT>&, Matrix<DT>&, const Matrix<DT>&, int64_t, int64_t, Matrix<DT>&) const { std::cout << "Applying TPQ not implemented for datatype" << std::endl; exit(1); } // // Routines related to removing columns of a matrix // void trap_qr(Vector<DT>& t, int64_t l) { assert(_n + l >= _m && "Nonconformal trap_qr"); assert( t._len >= std::min(_m, _n) && "Cannot perform trap qr."); for(int64_t i = 0; i < _n; i++) { t(i) = house_gen(l, &_values[i*_rs + i*_cs], _rs); if(i+1 < _n){ house_apply(l, _n-i-1, &_values[i*_rs + i*_cs], _rs, t(i), &_values[i*_rs + (i+1)*_cs], _rs, _cs); } } } void apply_trap_q(Matrix<DT>& A, const Vector<DT>& t, int64_t l) const { assert(_n + l >= _m && "Nonconformal apply_trap_q"); assert(t._len >= std::min(_m, _n) && "Cannot apply q from trap qr."); assert(_cs == 1 || _rs == 1 && "Only row or column major trap_qr supported"); for(int64_t i = 0; i < _n; i++) { house_apply(l, A.width(), &_values[i*_rs + i*_cs], _rs, t(i), &A._values[i*A._rs], A._rs, A._cs); } } void remove_col(int64_t col) { std::list<int64_t> tmp; tmp.push_back(col); remove_cols(tmp); } void remove_cols(const std::list<int64_t>& cols_to_remove) { int64_t start, end; start = rdtsc(); int64_t n_removed = 1; for(auto iter = cols_to_remove.begin(); iter != cols_to_remove.end(); iter++) { int64_t block_begin = *iter - (n_removed - 1); int64_t block_end = _n - n_removed; if(std::next(iter,1) != cols_to_remove.end()) { block_end = *std::next(iter,1) - n_removed; } for(int64_t i = block_begin; i < block_end; i++) { auto col_to_overwrite = this->subcol(i); const auto col_to_copy = this->subcol(i + n_removed); col_to_overwrite.copy(col_to_copy); } n_removed++; } this->enlarge_n(-cols_to_remove.size()); end = rdtsc(); PerfLog::get().log_total("REMOVE COLS BYTES", 2 * _m * _n); PerfLog::get().log_total("REMOVE COLS TIME", end - start); } void remove_cols_trap(const std::list<int64_t>& cols_to_remove) { int64_t n_removed = 1; for(auto iter = cols_to_remove.begin(); iter != cols_to_remove.end(); iter++) { int64_t block_begin = *iter - (n_removed - 1); int64_t block_end = _n - n_removed; if(std::next(iter,1) != cols_to_remove.end()) { block_end = *std::next(iter,1) - n_removed; } for(int64_t i = block_begin; i < block_end; i++) { auto col_to_overwrite = this->subcol(0, i, std::min(i + n_removed + 1, _m)); const auto col_to_copy = this->subcol(0, i + n_removed, std::min(i + n_removed + 1, _m)); col_to_overwrite.copy(col_to_copy); } n_removed++; } this->enlarge_n(-cols_to_remove.size()); } void remove_cols_trap_oop(Matrix<DT>& dest, const std::list<int64_t>& cols_to_remove) const { int64_t n_removed = 1; for(auto iter = cols_to_remove.begin(); iter != cols_to_remove.end(); iter++) { int64_t block_begin = *iter - (n_removed - 1); int64_t block_end = _n - n_removed; if(std::next(iter,1) != cols_to_remove.end()) { block_end = *std::next(iter,1) - n_removed; } for(int64_t i = block_begin; i < block_end; i++) { auto col_to_overwrite = dest.subcol(0, i, std::min(i + n_removed + 1, _m)); const auto col_to_copy = this->subcol(0, i + n_removed, std::min(i + n_removed + 1, _m)); col_to_overwrite.copy(col_to_copy); } n_removed++; } dest.enlarge_n(-cols_to_remove.size()); } void shift_trapezoid_up(int64_t dest_m_coord, int64_t dest_n_coord, int64_t nc, int64_t h, int64_t y_dist) { if(_rs == 1) { int64_t end_n = std::min(_n, dest_n_coord + nc); for(int64_t j = dest_n_coord; j < end_n; j++) { int64_t end_m = std::min(_m - y_dist, dest_m_coord + j + h); #pragma omp parallel for for(int64_t i = dest_m_coord; i < end_m; i++) { _values[i*_rs + j*_cs] = _values[(i+y_dist)*_rs + j*_cs]; } } } else { int64_t end_m = std::min(_m - y_dist, dest_m_coord + nc + h); int64_t end_n = std::min(_n, dest_n_coord + nc); #pragma omp parallel for for(int64_t i = dest_m_coord; i < end_m; i++) { int64_t start_n = std::min(i - dest_m_coord - h, (int64_t) 0); for(int64_t j = dest_n_coord + start_n; j < end_n; j++) { _values[i*_rs + j*_cs] = _values[(i + y_dist)*_rs + j*_cs]; } } } } void shift_trapezoid_left(int64_t dest_m_coord, int64_t dest_n_coord, int64_t nc, int64_t h, int64_t x_dist) { if(_rs == 1) { int64_t end_n = std::min(_n - x_dist, dest_n_coord + nc); for(int64_t j = dest_n_coord; j < end_n; j++) { int64_t end_m = std::min(_m, dest_m_coord + j + h); #pragma omp parallel for for(int64_t i = dest_m_coord; i < end_m; i++) { _values[i*_rs + j*_cs] = _values[i*_rs + (j + x_dist)*_cs]; } } } else { int64_t end_m = std::min(_m, dest_m_coord + nc + h); int64_t end_n = std::min(_n - x_dist, dest_n_coord + nc); #pragma omp parallel for for(int64_t i = dest_m_coord; i < end_m; i++) { int64_t start_n = std::min(i-dest_m_coord - h, (int64_t)0); for(int64_t j = dest_n_coord + start_n; j < end_n; j++) { _values[i*_rs + j*_cs] = _values[i*_rs + (j + x_dist)*_cs]; } } } } void shift_triangle_left(int64_t dest_m_coord, int64_t dest_n_coord, int64_t nc, int64_t x_dist) { DT* src = lea(dest_m_coord, dest_n_coord + x_dist); DT* dest = lea(dest_m_coord, dest_n_coord); if(_rs == 1) { int64_t N = std::min(nc, _n - x_dist - dest_n_coord); for(int64_t j = 0; j < N; j++) { int64_t M = std::min(j+1, _m - dest_m_coord); #pragma omp parallel for for(int64_t i = 0; i < M; i++) { dest[i*_rs + j*_cs] = src[i*_rs + j*_cs]; src[i*_rs + j*_cs] = 0.0; } } } else { int64_t M = std::min(nc, _m - dest_m_coord); #pragma omp parallel for for(int64_t i = 0; i < M; i++) { int64_t N = std::min(nc, _n - dest_n_coord - x_dist); for(int64_t j = i; j < N; j++) { dest[i*_rs + j*_cs] = src[i*_rs + j*_cs]; } } } } void shift_triangle_up(int64_t dest_m_coord, int64_t dest_n_coord, int64_t nc, int64_t y_dist) { DT* src = lea(dest_m_coord + y_dist, dest_n_coord); DT* dest = lea(dest_m_coord, dest_n_coord); if(_rs == 1) { int64_t N = std::min(nc, _n - dest_n_coord); #pragma omp parallel for for(int64_t j = 0; j < N; j++) { int64_t M = std::min(j+1, _m - dest_m_coord - y_dist); for(int64_t i = 0; i < M; i++) { dest[i*_rs + j*_cs] = src[i*_rs + j*_cs]; //if(i+y_dist >= M) src[i*_rs + j*_cs] = 0.0; src[i*_rs + j*_cs] = 0.0; } } } else { int64_t M = std::min(nc, _m - dest_m_coord - y_dist); for(int64_t i = 0; i < M; i++) { int64_t N = std::min(nc, _n - dest_n_coord); #pragma omp parallel for for(int64_t j = i; j < N; j++) { dest[i*_rs + j*_cs] = src[i*_rs + j*_cs]; } } } } void shift_dense_left(int64_t dest_m_coord, int64_t dest_n_coord, int64_t mc, int64_t nc, int64_t x_dist) { int64_t end_n = std::min(_n - x_dist, dest_n_coord + nc); int64_t end_m = std::min(_m, dest_m_coord + mc); if(_rs == 1) { for(int64_t j = dest_n_coord; j < end_n; j++) { #pragma omp parallel for for(int64_t i = dest_m_coord; i < end_m; i++) { _values[i*_rs + j*_cs] = _values[i*_rs + (j + x_dist)*_cs]; _values[i*_rs + (j + x_dist)*_cs] = 0; } } } else { #pragma omp parallel for for(int64_t i = dest_m_coord; i < end_m; i++) { for(int64_t j = dest_n_coord; j < end_n; j++) { _values[i*_rs + j*_cs] = _values[i*_rs + (j + x_dist)*_cs]; } } } } void shift_dense_up(int64_t dest_m_coord, int64_t dest_n_coord, int64_t mc, int64_t nc, int64_t y_dist) { int64_t end_n = std::min(_n, dest_n_coord + nc); int64_t end_m = std::min(_m - y_dist, dest_m_coord + mc); if(_rs == 1) { #pragma omp parallel for for(int64_t j = dest_n_coord; j < end_n; j++) { for(int64_t i = dest_m_coord; i < end_m; i++) { _values[i*_rs + j*_cs] = _values[(i+y_dist)*_rs + j*_cs]; _values[(i+ y_dist)*_rs + j*_cs] = 0; } } } else { for(int64_t i = dest_m_coord; i < end_m; i++) { #pragma omp parallel for for(int64_t j = dest_n_coord; j < end_n; j++) { _values[i*_rs + j*_cs] = _values[(i + y_dist)*_rs + j*_cs]; } } } } //Delete columns of the matrix and permute rows (to be annihilated) into V void remove_cols_permute_rows_in_place(const std::list<int64_t>& cols_to_remove, Matrix<DT>& V) { int64_t cols_removed = 1; for(auto j_iter = cols_to_remove.begin(); j_iter != cols_to_remove.end(); j_iter++) { //trap_begin and trap_end represent the start and end of the trapezoid after shifting it. int64_t source_j_begin = *j_iter + 1; if(source_j_begin == _n) break; int64_t source_j_end = _n; if(std::next(j_iter,1) != cols_to_remove.end()) { source_j_end = *std::next(j_iter,1); } int64_t dest_j_begin = source_j_begin - cols_removed; //Size of the triangular matrix along diagonal int64_t trap_n = source_j_end - source_j_begin; //Early exit conditions if(source_j_begin == source_j_end) { cols_removed++; continue; } //Copy blocks above diagonal, copy rows to annihilate int64_t rows_permuted = 0; int64_t source_i_begin = 0; for(auto i_iter = cols_to_remove.begin(); i_iter != std::next(j_iter,1); i_iter++) { int64_t dest_i_begin = source_i_begin - rows_permuted; //Copy the row to annihilate into the V matrix auto r = this->subrow(*i_iter, source_j_begin, trap_n); auto v = V.subrow(rows_permuted, dest_j_begin, trap_n); v.copy(r); //Shift block up and to the left this->shift_dense_left(source_i_begin, dest_j_begin, *i_iter - source_i_begin, trap_n, cols_removed); if(rows_permuted > 0) this->shift_dense_up(dest_i_begin, dest_j_begin, *i_iter - source_i_begin, trap_n, rows_permuted); source_i_begin = *i_iter + 1; rows_permuted++; } //Shift triangle along diagonal up and to the left this->shift_triangle_up(dest_j_begin, source_j_begin, trap_n, cols_removed); this->shift_triangle_left(dest_j_begin, dest_j_begin, trap_n, cols_removed); cols_removed++; } this->enlarge_n(-cols_to_remove.size()); this->enlarge_m(-cols_to_remove.size()); } void remove_cols_permute_rows_out_of_place(Matrix<DT>& dest, const std::list<int64_t>& cols_to_remove, Matrix<DT>& V) const { if(cols_to_remove.size() == 0) { dest.copy_upper_tri(*this); } //Copy initial triangle auto src_tri = this->submatrix(0, 0, cols_to_remove.front(), cols_to_remove.front()); auto dest_tri = dest.submatrix(0, 0, cols_to_remove.front(), cols_to_remove.front()); dest_tri.copy_upper_tri(src_tri); int64_t cols_removed = 1; for(auto j_iter = cols_to_remove.begin(); j_iter != cols_to_remove.end(); j_iter++) { int64_t source_j_begin = *j_iter + 1; if(source_j_begin == _n) break; int64_t source_j_end = _n; if(std::next(j_iter,1) != cols_to_remove.end()) { source_j_end = *std::next(j_iter,1); } int64_t dest_j_begin = source_j_begin - cols_removed; //Size of the triangular matrix along diagonal int64_t trap_n = source_j_end - source_j_begin; //Early exit conditions if(source_j_begin == source_j_end) { cols_removed++; continue; } //Copy blocks above diagonal, copy rows to annihilate int64_t rows_permuted = 0; int64_t source_i_begin = 0; for(auto i_iter = cols_to_remove.begin(); i_iter != std::next(j_iter,1); i_iter++) { int64_t dest_i_begin = source_i_begin - rows_permuted; //Copy the row to annihilate into the V matrix auto r = this->subrow(*i_iter, source_j_begin, trap_n); auto v = V.subrow(rows_permuted, dest_j_begin, trap_n); v.copy(r); //Copy block auto src_blk = this->submatrix(source_i_begin, source_j_begin, *i_iter - source_i_begin, trap_n); auto dest_blk = dest.submatrix(dest_i_begin, dest_j_begin, *i_iter - source_i_begin, trap_n); dest_blk.copy(src_blk); source_i_begin = *i_iter + 1; rows_permuted++; } //Copy triangle along diagonal auto src_tri = this->submatrix(source_j_begin, source_j_begin, trap_n, trap_n); auto dest_tri = dest.submatrix(dest_j_begin, dest_j_begin, trap_n, trap_n); dest_tri.copy_upper_tri(src_tri); cols_removed++; } dest.enlarge_n(-cols_to_remove.size()); dest.enlarge_m(-cols_to_remove.size()); } //Remove columns and rows beforehand, and use tpqr to annihilate rows, task parallel version //ws must be at least nb by n void remove_cols_incremental_qr_tasks_kressner(Matrix<DT>& dest, const std::list<int64_t>& cols_to_remove, Matrix<DT>& T, Matrix<DT>& V, int64_t task_size, int64_t nb, Matrix<DT>& ws) const { int64_t start, end; start = rdtsc(); //Delete columns and permute rows into the V matrix this->remove_cols_permute_rows_out_of_place(dest, cols_to_remove, V); //Partition the matrix according to the positions of the columns to be removed int64_t n_removed = 1; for(auto iter = cols_to_remove.begin(); iter != cols_to_remove.end(); iter++) { //trap_begin and trap_end represent the start and end of the trapezoid after shifting it. int64_t trap_begin = *iter + 1 - n_removed; int64_t trap_end = dest.width(); if(std::next(iter,1) != cols_to_remove.end()) trap_end = *std::next(iter,1) - n_removed; int64_t trap_n = trap_end - trap_begin; //Early exit conditions if(trap_begin == trap_end) { n_removed++; continue; } #pragma omp parallel #pragma omp single nowait { for(int64_t i = 0; i < trap_n; i += task_size) { int64_t block_m = std::min(task_size, trap_n - i); int64_t block_begin = trap_begin + i; //Factorization #pragma omp task depend(inout: i) { auto V1 = V.submatrix(0, block_begin, n_removed, block_m); auto T1 = T.submatrix(0, block_begin, nb, block_m); auto R11 = dest.submatrix(block_begin, block_begin, block_m, block_m); auto ws1 = ws.submatrix(0, block_begin, nb, block_m); R11.tpqr(V1, T1, 0, nb); } //Apply Q to the rest of the matrix for(int64_t j = block_begin + block_m; j < dest.width(); j += task_size) { int64_t block_n = std::min(task_size, dest.width() - j); #pragma omp task depend(in:i) //depend(inout:j) { auto V1 = V.submatrix(0, block_begin, n_removed, block_m); auto T1 = T.submatrix(0, block_begin, nb, block_m); auto R12 = dest.submatrix(block_begin, j, block_m, block_n); auto V2 = V.submatrix(0, j, n_removed, block_n); auto ws2 = ws.submatrix(0, j, ws.height(), block_n); V1.apply_tpq(R12, V2, T1, 0, nb, ws2); } } } } n_removed++; } end = rdtsc(); end++; PerfLog::get().log_total("REMOVE COLS QR BYTES", 2 * _m * _n); PerfLog::get().log_total("REMOVE COLS QR TIME", end - start); } void remove_cols_incremental_qr_kressner(Matrix<DT>& dest, const std::list<int64_t>& cols_to_remove, Matrix<DT>& T, Matrix<DT>& Vin, int64_t nb, Matrix<DT>& ws) const { int64_t start, end; start = rdtsc(); Matrix<DT> V = Vin.submatrix(0,0,Vin.height(), this->width() - cols_to_remove.size()); //Delete columns and permute rows into the V matrix this->remove_cols_permute_rows_out_of_place(dest, cols_to_remove, V); //First partition the matrix according to the positions of the columns to be removed int64_t n_removed = 1; for(auto iter = cols_to_remove.begin(); iter != cols_to_remove.end(); iter++) { //trap_begin and trap_end represent the start and end of the trapezoid after shifting it. int64_t trap_begin = *iter + 1 - n_removed; int64_t trap_end = dest.width(); if(std::next(iter,1) != cols_to_remove.end()) trap_end = *std::next(iter,1) - n_removed; int64_t trap_n = trap_end - trap_begin; //Early exit conditions if(trap_begin == trap_end) { n_removed++; continue; } //TPQR along the diagonal auto V1 = V.submatrix(0, trap_begin, n_removed, trap_n); auto R11 = dest.submatrix(trap_begin, trap_begin, trap_n, trap_n); auto T1 = T.submatrix(0, trap_begin, T.height(), trap_n); R11.tpqr(V1, T1, 0, nb); //Apply Q to the rest of matrix int64_t trail_begin = trap_end; if (trail_begin < dest.width()) { auto R12 = dest.submatrix(trap_begin, trail_begin, trap_n, dest.width() - trail_begin); auto V2 = V.submatrix(0, trail_begin, n_removed, dest.width() - trail_begin); V1.apply_tpq(R12, V2, T1, 0, nb, ws); } n_removed++; } end = rdtsc(); PerfLog::get().log_total("REMOVE COLS QR BYTES", 2 * _m * _n); PerfLog::get().log_total("REMOVE COLS QR TIME", end - start); } void remove_cols_incremental_qr_householder(const std::list<int64_t>& cols_to_remove, Vector<DT>& t) { int64_t start, end; start = rdtsc(); //First remove the columns. this->remove_cols_trap(cols_to_remove); //Second pass. //Use trapezoidal QR factorization to clean up elements below the diagonal //Partition the matrix by panels based on the columns there were removed int64_t n_removed = 1; for(auto iter = cols_to_remove.begin(); iter != cols_to_remove.end(); iter++) { int64_t block_begin = *iter - (n_removed - 1); if(block_begin == _n) break; int64_t block_end = _n; if(std::next(iter,1) != cols_to_remove.end()) { block_end = *std::next(iter,1) - n_removed; } int64_t m = block_end - block_begin + n_removed; int64_t n = block_end - block_begin; auto R11 = this->submatrix(block_begin, block_begin, m, n); auto t1 = t.subvector(block_begin, n); R11.trap_qr(t1, n_removed + 1); if(block_end < _n) { auto R12 = this->submatrix(block_begin, block_end, m, _n - block_end); R11.apply_trap_q(R12, t1, n_removed + 1); } n_removed++; } this->enlarge_m(-cols_to_remove.size()); end = rdtsc(); PerfLog::get().log_total("REMOVE COLS QR BYTES", 2 * _m * _n); PerfLog::get().log_total("REMOVE COLS QR TIME", end - start); } //Use givens rotations to annihilate the element below each diagonal element void annihilate_subdiag_givens(Matrix<DT>& T) { for(int64_t i = 0; i < _n && i < _m-1; i++) { //1. figure out Givens rotation to annihilate element below diagonal. rotg(lea(i,i), lea(i+1,i), T.lea(0,i), T.lea(1,i)); //2. Trailing update of Givens rotation if(i+1 < _n) rot(_n-(i+1), lea(i,i+1), _cs, lea(i+1,i+1), _cs, T(0,i), T(1,i)); } } void apply_givens_rots(Matrix<DT>& T) { for(int64_t i = 0; i < _m-1; i++) { rot(_n, lea(i,0), _cs, lea(i+1,0), _cs, T(0,i), T(1,i)); } } void remove_column_iqr_givens(int64_t column, Matrix<DT>& T, int64_t nb) { //First shift dense block above diagonal and to the right of the column removed //to the left by one. for(int i = 0; i < _m; i++) (*this)(i,column) = 0; shift_dense_left(0, column, column, _n-column, 1); //Proceed along the diagonal by blocks for(int64_t i = column; i+1 < _n; i += nb) { //Partition the matrix auto R11 = this->submatrix(i, i+1, nb+1, nb); auto T1 = T.submatrix(0, i+1, 2, nb+1); //Annihilate subdiagonal elements and shift left R11.annihilate_subdiag_givens(T1); shift_triangle_left(i, i, nb, 1); //Trailing update for(int64_t j = i + nb; j+1 < _n; j += nb) { auto R12 = this->submatrix(i, j+1, nb+1, nb); R12.apply_givens_rots(T1); shift_dense_left(i, j, nb, nb, 1); } } this->enlarge_m(-1); this->enlarge_n(-1); } }; template<> void Matrix<double>::mvm(double alpha, const Vector<double>& x, double beta, Vector<double>& y) const; template<> void Matrix<double>::trsv(CBLAS_UPLO uplo, Vector<double>& x) const; template<> void Matrix<double>::trsm(CBLAS_UPLO uplo, CBLAS_SIDE side, Matrix<double>& X) const; template<> void Matrix<double>::mmm(double alpha, const Matrix<double>& A, const Matrix<double>& B, double beta); template<> void Matrix<double>::syrk(CBLAS_UPLO uplo, double alpha, const Matrix<double>& A, double beta); template<> void Matrix<double>::qr(Vector<double>& t); template<> void Matrix<double>::chol(char uplo); template<> void Matrix<float>::chol(char uplo); template<> void Matrix<double>::tpqr(Matrix<double>& B, Matrix<double>& T, int64_t l_in, int64_t nb_in); template<> void Matrix<double>::apply_tpq(Matrix<double>& A, Matrix<double>& B, const Matrix<double>& T, int64_t l_in, int64_t nb_in, Matrix<double>& ws) const; template<> void Matrix<float>::mvm(float alpha, const Vector<float>& x, float beta, Vector<float>& y) const; template<> void Matrix<float>::trsv(CBLAS_UPLO uplo, Vector<float>& x) const; template<> void Matrix<float>::trsm(CBLAS_UPLO uplo, CBLAS_SIDE side, Matrix<float>& X) const; template<> void Matrix<float>::mmm(float alpha, const Matrix<float>& A, const Matrix<float>& B, float beta); template<> void Matrix<float>::syrk(CBLAS_UPLO uplo, float alpha, const Matrix<float>& A, float beta); template<> void Matrix<float>::qr(Vector<float>& t); //Use tpqrt to annihilate rectangle above the triangle. //Then stores the reflectors in that block //TODO: handle row-major template<> void Matrix<float>::tpqr(Matrix<float>& B, Matrix<float>& T, int64_t l_in, int64_t nb_in); template<> void Matrix<float>::apply_tpq(Matrix<float>& A, Matrix<float>& B, const Matrix<float>& T, int64_t l_in, int64_t nb_in, Matrix<float>& ws) const; #endif
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y][n]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const int K = dst.shape_[0]; for (index_t y = 0; y < index.size(0); ++y) { int j = index[y]; if (j <= 0) j = 0; else if (j >= K) j = K - 1; dst[j] += src[y]; } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
pi-v2.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if !_DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if !_DEBUG_ start= omp_get_wtime(); #endif /* do computation -- using all available threads */ // WARNING : incorrect code #pragma omp parallel private(i, x) { #if _DEBUG_ int id = omp_get_thread_num(); #endif for (i=0; i < num_steps; ++i) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } pi = step * sum; #if !_DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
fractional_step_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #ifndef KRATOS_FRACTIONAL_STEP_STRATEGY #define KRATOS_FRACTIONAL_STEP_STRATEGY // System includes // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/cfd_variables.h" #include "processes/process.h" #include "solving_strategies/strategies/solving_strategy.h" #include "utilities/variable_utils.h" // Application includes #include "custom_utilities/solver_settings.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @brief Fractional-step strategy for incompressible Navier-Stokes formulation * This strategy implements a splitting scheme for the incompressible Navier-Stokes equations. * It is intended to be used in combination with the FractionalStep element in the FluidDynamicsApplicatoin. * The fractional step index, which is stored in the ProcessInfo, takes the values * 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity * @tparam TSparseSpace Sparse space template type * @tparam TDenseSpace Dense space template type * @tparam TLinearSolver Linear solver template type */ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class FractionalStepStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ /// Counted pointer of FractionalStepStrategy KRATOS_CLASS_POINTER_DEFINITION(FractionalStepStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef SolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ FractionalStepStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector): BaseType(rModelPart,false), mCalculateReactionsFlag(false), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { KRATOS_WARNING("FractionalStepStrategy") << "This constructor is deprecated. Use the one with the \'CalculateReactionsFlag\' instead." << std::endl; InitializeStrategy(rSolverConfig,PredictorCorrector); } FractionalStepStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector, const Kratos::Variable<int>& PeriodicVar): BaseType(rModelPart,false), mCalculateReactionsFlag(false), mrPeriodicIdVar(PeriodicVar) { KRATOS_WARNING("FractionalStepStrategy") << "This constructor is deprecated. Use the one with the \'CalculateReactionsFlag\' instead." << std::endl; InitializeStrategy(rSolverConfig,PredictorCorrector); } FractionalStepStrategy( ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector, bool CalculateReactionsFlag) : BaseType(rModelPart,false) , mCalculateReactionsFlag(CalculateReactionsFlag) , mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { InitializeStrategy(rSolverConfig,PredictorCorrector); } FractionalStepStrategy( ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector, bool CalculateReactionsFlag, const Kratos::Variable<int>& PeriodicVar) : BaseType(rModelPart,false) , mCalculateReactionsFlag(CalculateReactionsFlag) , mrPeriodicIdVar(PeriodicVar) { InitializeStrategy(rSolverConfig,PredictorCorrector); } /// Destructor. ~FractionalStepStrategy() override{} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void Initialize() override { // Set up nodes to use slip conditions if needed. if (mUseSlipConditions) { auto& r_model_part = BaseType::GetModelPart(); const int n_conds = r_model_part.NumberOfConditions(); #pragma omp parallel for for (int i_cond = 0; i_cond < n_conds; ++i_cond) { auto it_cond = r_model_part.ConditionsBegin() + i_cond; if (it_cond->Is(SLIP)) { auto& r_geom = it_cond->GetGeometry(); for (auto& r_node : r_geom) { r_node.SetLock(); r_node.Set(SLIP, true); r_node.UnSetLock(); } } } } } int Check() override { KRATOS_TRY; // Base strategy check int ierr = BaseType::Check(); if (ierr != 0) { return ierr; } // Check time order and buffer size const auto& r_model_part = BaseType::GetModelPart(); KRATOS_ERROR_IF(mTimeOrder == 2 && r_model_part.GetBufferSize() < 3) << "Buffer size too small for fractional step strategy (BDF2), needed 3, got " << r_model_part.GetBufferSize() << std::endl; KRATOS_ERROR_IF(mTimeOrder == 1 && r_model_part.GetBufferSize() < 2) << "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got " << r_model_part.GetBufferSize() << std::endl; // Check elements and conditions const auto &r_current_process_info = r_model_part.GetProcessInfo(); for (const auto& r_element : r_model_part.Elements()) { ierr = r_element.Check(r_current_process_info); if (ierr != 0) { break; } } for (const auto& r_condition : r_model_part.Conditions()) { ierr = r_condition.Check(r_current_process_info); if (ierr != 0) { break; } } return ierr; KRATOS_CATCH(""); } void InitializeSolutionStep() override { // Initialize BDF2 coefficients SetTimeCoefficients(); } bool SolveSolutionStep() override { bool converged = false; if (mPredictorCorrector) { const unsigned int echo_level = BaseType::GetEchoLevel(); // Iterative solution for pressure for (unsigned int it = 0; it < mMaxPressureIter; ++it) { KRATOS_INFO_IF("FractionalStepStrategy", echo_level > 1) << "Pressure iteration " << it << std::endl; const auto convergence_output = this->SolveStep(); converged = this->CheckPressureConvergence(std::get<1>(convergence_output)); if (converged) { KRATOS_INFO_IF("FractionalStepStrategy", echo_level > 0) << "Predictor-corrector converged in " << it + 1 << " iterations." << std::endl; break; } } KRATOS_WARNING_IF("FractionalStepStrategy", !converged && echo_level > 0) << "Predictor-correctior iterations did not converge." << std::endl; } else { // Solve for fractional step velocity, then update pressure once const auto convergence_output = this->SolveStep(); // If not doing predictor corrector iterations, norm_dp will // typically be "large" since we are not iterating on pressure. // It makes no sense to report that the iteration didn't converge // based on this. Hence, what we report is the convergence of the // fractional step velocity. converged = std::get<0>(convergence_output); } // Calculate reactions if (mCalculateReactionsFlag) { CalculateReactions(); } return converged; } void FinalizeSolutionStep() override { if (mReformDofSet) { this->Clear(); } } //TODO: Move to private section as soon as we remove the Python exposure /** * @brief Calculates the reactions * This methods calculates the reactions of the momentum equation. * These are computed as minus the RHS and saved in the REACTION variable */ virtual void CalculateReactions() { auto &r_model_part = BaseType::GetModelPart(); auto &r_process_info = r_model_part.GetProcessInfo(); const int n_elems = r_model_part.NumberOfElements(); // Set fractional step index to the momentum equation step const int original_step = r_process_info[FRACTIONAL_STEP]; r_process_info.SetValue(FRACTIONAL_STEP, 1); // Allocate and initialize values for REACTION calculation LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; const auto &r_const_process_info = r_process_info; VariableUtils().SetHistoricalVariableToZero(REACTION, r_model_part.Nodes()); #pragma omp parallel for private(RHS_Contribution, LHS_Contribution) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { // Build local system auto it_elem = r_model_part.ElementsBegin() + i_elem; it_elem->CalculateLocalSystem( LHS_Contribution, RHS_Contribution, r_const_process_info); // Accumulate minus the RHS as the reaction unsigned int index = 0; auto& r_geom = it_elem->GetGeometry(); const unsigned int n_nodes = r_geom.PointsNumber(); for (unsigned int i = 0; i < n_nodes; ++i) { r_geom[i].SetLock(); auto& r_reaction = r_geom[i].FastGetSolutionStepValue(REACTION); for (unsigned int d = 0; d < mDomainSize; ++d) { r_reaction[d] -= RHS_Contribution[index++]; } r_geom[i].UnSetLock(); } } // Synchronize the local REACTION values r_model_part.GetCommunicator().AssembleCurrentData(REACTION); // Reset original fractional step index r_process_info.SetValue(FRACTIONAL_STEP, original_step); } virtual void AddIterationStep(Process::Pointer pNewStep) { mExtraIterationSteps.push_back(pNewStep); } virtual void ClearExtraIterationSteps() { mExtraIterationSteps.clear(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "FractionalStepStrategy" ; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxVelocityIter; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mPredictorCorrector; bool mUseSlipConditions; bool mReformDofSet; bool mCalculateReactionsFlag; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; std::vector< Process::Pointer > mExtraIterationSteps; const Kratos::Variable<int>& mrPeriodicIdVar; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief Set the Time Coefficients object * Calculate the coefficients for the BDF2 time iteration. * These are stored in the BDF_COEFFICIENTS variable of the ProcessInfo container. */ void SetTimeCoefficients() { KRATOS_TRY; auto &r_process_info = (BaseType::GetModelPart()).GetProcessInfo(); if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = r_process_info[DELTA_TIME]; double OldDt = r_process_info.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = r_process_info[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = r_process_info[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector& BDFcoeffs = r_process_info[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } virtual std::tuple<bool,double> SolveStep() { ModelPart& rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); // 1. Fractional step momentum iteration rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); bool Converged = false; for(unsigned int it = 0; it < mMaxVelocityIter; ++it) { KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 1) << "Momentum iteration " << it << std::endl; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); double NormDv = mpMomentumStrategy->Solve(); // // Compute projections (for stabilization) // rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); // this->ComputeSplitOssProjections(rModelPart); // // Additional steps // Moved to end of step // for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); // iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) // (*iExtraSteps)->Execute(); // Check convergence Converged = this->CheckFractionalStepConvergence(NormDv); if (Converged) { KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Fractional velocity converged in " << it + 1 << " iterations." << std::endl; break; } } KRATOS_INFO_IF("FractionalStepStrategy", !Converged && BaseType::GetEchoLevel() > 0) << "Fractional velocity iterations did not converge." << std::endl; // Compute projections (for stabilization) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); this->ComputeSplitOssProjections(rModelPart); // 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,5); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double old_press = it_node->FastGetSolutionStepValue(PRESSURE); it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -old_press; } KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Calculating Pressure." << std::endl; double NormDp = mpPressureStrategy->Solve(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) += it_node->FastGetSolutionStepValue(PRESSURE); } // 3. Compute end-of-step velocity KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Updating Velocity." << std::endl; rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,6); this->CalculateEndOfStepVelocity(); /* mpPressureStrategy->Clear(); double NormDu = mpPressureStrategy->Solve(); mpPressureStrategy->Clear(); */ // Additional steps for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) (*iExtraSteps)->Execute(); // Set the output tuple as the fractional velocity convergence and pressure norm return std::make_tuple(Converged, NormDp); } bool CheckFractionalStepConvergence(const double NormDv) { ModelPart& rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormV = 0.00; #pragma omp parallel for reduction(+:NormV) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { NormV += r_vel[d] * r_vel[d]; } } NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); const double zero_tol = 1.0e-12; const double Ratio = (NormV < zero_tol) ? NormDv : NormDv / NormV; KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "CONVERGENCE CHECK:" << std::endl; KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << std::scientific << std::setprecision(8) << "FRAC VEL.: ratio = " << Ratio <<"; exp.ratio = " << mVelocityTolerance << " abs = " << NormDv << std::endl; if (Ratio < mVelocityTolerance) return true; else return false; } bool CheckPressureConvergence(const double NormDp) { ModelPart& rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormP = 0.00; #pragma omp parallel for reduction(+:NormP) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const double Pr = it_node->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); const double zero_tol = 1.0e-12; const double Ratio = (NormP < zero_tol) ? NormDp : NormDp / NormP; KRATOS_INFO_IF("FractionalStepStrategy", BaseType::GetEchoLevel() > 0) << "Pressure relative error: " << Ratio << std::endl; if (Ratio < mPressureTolerance) { return true; } else return false; } virtual void ComputeSplitOssProjections(ModelPart& rModelPart) { array_1d<double,3> Out = ZeroVector(3); const int n_nodes = rModelPart.NumberOfNodes(); const int n_elems = rModelPart.NumberOfElements(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; it_node->FastGetSolutionStepValue(CONV_PROJ) = CONV_PROJ.Zero(); it_node->FastGetSolutionStepValue(PRESS_PROJ) = PRESS_PROJ.Zero(); it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0; it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } #pragma omp parallel for for (int i_elem = 0; i_elem < n_elems; ++i_elem) { const auto it_elem = rModelPart.ElementsBegin() + i_elem; it_elem->Calculate(CONV_PROJ, Out, rModelPart.GetProcessInfo()); } rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); // If there are periodic conditions, add contributions from both sides to the periodic nodes this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA); it_node->FastGetSolutionStepValue(CONV_PROJ) /= NodalArea; it_node->FastGetSolutionStepValue(PRESS_PROJ) /= NodalArea; it_node->FastGetSolutionStepValue(DIVPROJ) /= NodalArea; } } virtual void CalculateEndOfStepVelocity() { ModelPart& rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); const int n_elems = rModelPart.NumberOfElements(); array_1d<double,3> Out = ZeroVector(3); VariableUtils().SetHistoricalVariableToZero(FRACT_VEL, rModelPart.Nodes()); #pragma omp parallel for for (int i_elem = 0; i_elem < n_elems; ++i_elem) { const auto it_elem = rModelPart.ElementsBegin() + i_elem; it_elem->Calculate(VELOCITY, Out, rModelPart.GetProcessInfo()); } rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); this->PeriodicConditionVelocityCorrection(rModelPart); // Force the end of step velocity to verify slip conditions in the model if (mUseSlipConditions) this->EnforceSlipCondition(SLIP); if (mDomainSize > 2) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA); if ( ! it_node->IsFixed(VELOCITY_X) ) it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! it_node->IsFixed(VELOCITY_Y) ) it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; if ( ! it_node->IsFixed(VELOCITY_Z) ) it_node->FastGetSolutionStepValue(VELOCITY_Z) += it_node->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea; } } else { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA); if ( ! it_node->IsFixed(VELOCITY_X) ) it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! it_node->IsFixed(VELOCITY_Y) ) it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; } } } /** * @brief Substract wall-normal component of velocity update to ensure that the final velocity satisfies slip conditions. * @param rSlipWallFlag If Node.Is(rSlipWallFlag) == true, the node is in the wall. */ void EnforceSlipCondition(const Kratos::Flags& rSlipWallFlag) { ModelPart& rModelPart = BaseType::GetModelPart(); const int num_nodes_in_model_part = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < num_nodes_in_model_part; i++) { ModelPart::NodeIterator itNode = rModelPart.NodesBegin() + i; const Node<3>& r_const_node = *itNode; if ( r_const_node.Is(rSlipWallFlag) ) { const array_1d<double,3>& rNormal = itNode->FastGetSolutionStepValue(NORMAL); array_1d<double,3>& rDeltaVelocity = itNode->FastGetSolutionStepValue(FRACT_VEL); double Proj = rNormal[0] * rDeltaVelocity[0]; double Norm = rNormal[0] * rNormal[0]; for (unsigned int d = 1; d < mDomainSize; ++d) { Proj += rNormal[d] * rDeltaVelocity[d]; Norm += rNormal[d] * rNormal[d]; } Proj /= Norm; rDeltaVelocity -= Proj * rNormal; } } } /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, transmiting the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { Communicator& r_comm = rModelPart.GetCommunicator(); if (mrPeriodicIdVar.Key() != Kratos::Variable<int>::StaticObject().Key()) { int GlobalNodesNum = r_comm.LocalMesh().Nodes().size(); GlobalNodesNum = r_comm.GetDataCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { double NodalArea = rNode0.FastGetSolutionStepValue(NODAL_AREA) + rNode1.FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rNode0.FastGetSolutionStepValue(CONV_PROJ) + rNode1.FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rNode0.FastGetSolutionStepValue(PRESS_PROJ) + rNode1.FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rNode0.FastGetSolutionStepValue(DIVPROJ) + rNode1.FastGetSolutionStepValue(DIVPROJ); rNode0.GetValue(NODAL_AREA) = NodalArea; rNode0.GetValue(CONV_PROJ) = ConvProj; rNode0.GetValue(PRESS_PROJ) = PressProj; rNode0.GetValue(DIVPROJ) = DivProj; rNode1.GetValue(NODAL_AREA) = NodalArea; rNode1.GetValue(CONV_PROJ) = ConvProj; rNode1.GetValue(PRESS_PROJ) = PressProj; rNode1.GetValue(DIVPROJ) = DivProj; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { double NodalArea = rGeom[0].FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rGeom[0].FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rGeom[0].FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rGeom[0].FastGetSolutionStepValue(DIVPROJ); for (unsigned int i = 1; i < 4; i++) { NodalArea += rGeom[i].FastGetSolutionStepValue(NODAL_AREA); ConvProj += rGeom[i].FastGetSolutionStepValue(CONV_PROJ); PressProj += rGeom[i].FastGetSolutionStepValue(PRESS_PROJ); DivProj += rGeom[i].FastGetSolutionStepValue(DIVPROJ); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(NODAL_AREA) = NodalArea; rGeom[i].GetValue(CONV_PROJ) = ConvProj; rGeom[i].GetValue(PRESS_PROJ) = PressProj; rGeom[i].GetValue(DIVPROJ) = DivProj; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { if (itNode->GetValue(NODAL_AREA) != 0.0) { itNode->FastGetSolutionStepValue(NODAL_AREA) = itNode->GetValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) = itNode->GetValue(CONV_PROJ); itNode->FastGetSolutionStepValue(PRESS_PROJ) = itNode->GetValue(PRESS_PROJ); itNode->FastGetSolutionStepValue(DIVPROJ) = itNode->GetValue(DIVPROJ); // reset for next iteration itNode->GetValue(NODAL_AREA) = 0.0; itNode->GetValue(CONV_PROJ) = CONV_PROJ.Zero(); itNode->GetValue(PRESS_PROJ) = PRESS_PROJ.Zero(); itNode->GetValue(DIVPROJ) = 0.0; } } } } void PeriodicConditionVelocityCorrection(ModelPart& rModelPart) { Communicator& r_comm = rModelPart.GetCommunicator(); if (mrPeriodicIdVar.Key() != Kratos::Variable<int>::StaticObject().Key()) { int GlobalNodesNum = r_comm.LocalMesh().Nodes().size(); GlobalNodesNum = r_comm.GetDataCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { array_1d<double,3> DeltaVel = rNode0.FastGetSolutionStepValue(FRACT_VEL) + rNode1.FastGetSolutionStepValue(FRACT_VEL); rNode0.GetValue(FRACT_VEL) = DeltaVel; rNode1.GetValue(FRACT_VEL) = DeltaVel; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { array_1d<double,3> DeltaVel = rGeom[0].FastGetSolutionStepValue(FRACT_VEL); for (unsigned int i = 1; i < 4; i++) { DeltaVel += rGeom[i].FastGetSolutionStepValue(FRACT_VEL); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(FRACT_VEL) = DeltaVel; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { array_1d<double,3>& rDeltaVel = itNode->GetValue(FRACT_VEL); if ( rDeltaVel[0]*rDeltaVel[0] + rDeltaVel[1]*rDeltaVel[1] + rDeltaVel[2]*rDeltaVel[2] != 0.0) { itNode->FastGetSolutionStepValue(FRACT_VEL) = itNode->GetValue(FRACT_VEL); rDeltaVel = ZeroVector(3); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy( SolverSettingsType& rSolverConfig, bool PredictorCorrector) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); mDomainSize = rSolverConfig.GetDomainSize(); mPredictorCorrector = PredictorCorrector; mUseSlipConditions = rSolverConfig.UseSlipConditions(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity,mVelocityTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); } else { KRATOS_ERROR << "FractionalStepStrategy error: No Velocity strategy defined in FractionalStepSettings" << std::endl; } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure,mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,mMaxPressureIter); } else { KRATOS_ERROR << "FractionalStepStrategy error: No Pressure strategy defined in FractionalStepSettings" << std::endl; } Process::Pointer pTurbulenceProcess; bool HaveTurbulence = rSolverConfig.GetTurbulenceModel(pTurbulenceProcess); if (HaveTurbulence) mExtraIterationSteps.push_back(pTurbulenceProcess); // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. FractionalStepStrategy& operator=(FractionalStepStrategy const& rOther){} /// Copy constructor. FractionalStepStrategy(FractionalStepStrategy const& rOther){} ///@} }; /// Class FStepStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_FRACTIONAL_STEP_STRATEGY
hdp_math_utils.c
#include <math.h> #include <tgmath.h> #include <stdlib.h> #include <stdio.h> #include <float.h> #include <stdbool.h> #include <inttypes.h> #include "hdp_math_utils.h" #include "sonLib.h" #define LOG_ROOT_PI 0.572364942924700087071713 #define LOG_4 1.386294361119890618834464 #ifndef M_PI #define M_PI 3.14159265358979323846264338 #endif #ifndef EULER_MASCHERONI #define EULER_MASCHERONI 0.57721566490153286060651209008240243 #endif #ifndef MACHEP #define MACHEP 1.11022302462515654042E-16 #endif #ifndef MINUS_INF #define MINUS_INF -0.5 * DBL_MAX #endif void parallel_cdf(double* cdf, double* probs, int64_t length, int64_t chunk_size) { if (2 * chunk_size >= length) { double cumul = 0.0; for (int64_t i = 0; i < length; i++) { cumul += probs[i]; cdf[i] = cumul; } return; } int64_t num_chunks = (length - 1) / chunk_size + 1; #pragma omp parallel for shared(cdf,probs) for (int64_t i = 0; i < num_chunks; i++) { int64_t start = i * chunk_size; int64_t stop = start + chunk_size; if (stop > length) { stop = length; } double partial_cumul = 0.0; for (int64_t j = start; j < stop; j++) { partial_cumul += probs[j]; cdf[j] = partial_cumul; } } double* partial_sums = (double*) malloc(sizeof(double) * num_chunks); double partial_sums_cumul = 0.0; for (int64_t i = chunk_size - 1; i < length; i += chunk_size) { partial_sums_cumul += cdf[i]; partial_sums[i / chunk_size] = partial_sums_cumul; } #pragma omp parallel for shared(cdf,partial_sums) for (int64_t i = chunk_size; i < length; i++) { cdf[i] += partial_sums[i / chunk_size - 1]; } free(partial_sums); } double parallel_max(double* x, int64_t length) { double max_val = MINUS_INF; #pragma omp parallel shared(max_val) { double local_max = MINUS_INF; #pragma omp for nowait for (int64_t i = 0; i < length; i++) { if (x[i] > local_max) { local_max = x[i]; } } #pragma omp critical { if (local_max > max_val) { max_val = local_max; } } } return max_val; } void parallel_add(double add_val, double* x, int64_t length) { #pragma omp parallel for for (int64_t i = 0; i < length; i++) { x[i] += add_val; } } void parallel_exp(double* x, int64_t length) { #pragma omp parallel for for (int64_t i = 0; i < length; i++) { x[i] = exp(x[i]); } } typedef struct LogGammaHalfMemo LogGammaHalfMemo; struct LogGammaHalfMemo { double alpha; double* zero_offset_memo; int64_t zero_offset_final_entry; int64_t zero_offset_length; double* half_offset_memo; int64_t half_offset_final_entry; int64_t half_offset_length; }; LogGammaHalfMemo* new_log_gamma_memo(double alpha) { LogGammaHalfMemo* memo = (LogGammaHalfMemo*) malloc(sizeof(LogGammaHalfMemo)); memo->alpha = alpha; double* zero_base_case = (double*) malloc(sizeof(double)); zero_base_case[0] = lgamma(alpha); memo->zero_offset_final_entry = 0; memo->zero_offset_memo = zero_base_case; memo->zero_offset_length = 1; double* half_base_case = (double*) malloc(sizeof(double)); half_base_case[0] = lgamma(alpha + .5); memo->half_offset_final_entry = 0; memo->half_offset_memo = half_base_case; memo->half_offset_length = 1; return memo; } void destroy_log_gamma_memo(LogGammaHalfMemo* memo) { free(memo->half_offset_memo); free(memo->zero_offset_memo); free(memo); } void extend_gamma_zero_offset_memo(LogGammaHalfMemo* memo) { int64_t final_entry = memo->half_offset_final_entry + 1; memo->zero_offset_final_entry = final_entry; double* current_array = memo->zero_offset_memo; int64_t current_length = memo->zero_offset_length; if (current_length == final_entry) { int64_t new_array_length = current_length * 2; double* new_array = (double*) malloc(sizeof(double) * new_array_length); for (int64_t i = 0; i < current_length; i++) { new_array[i] = current_array[i]; } memo->zero_offset_length = new_array_length; memo->zero_offset_memo = new_array; free(current_array); current_array = new_array; } double log_term = log(memo->alpha - 1.0 + (double) final_entry); current_array[final_entry] = current_array[final_entry - 1] + log_term; } void extend_gamma_half_offset_memo(LogGammaHalfMemo* memo) { int64_t final_entry = memo->half_offset_final_entry + 1; memo->half_offset_final_entry = final_entry; double* current_array = memo->half_offset_memo; int64_t current_length = memo->half_offset_length; if (current_length == final_entry) { int64_t new_array_length = current_length * 2; double* new_array = (double*) malloc(sizeof(double) * new_array_length); for (int64_t i = 0; i < current_length; i++) { new_array[i] = current_array[i]; } memo->half_offset_length = new_array_length; memo->half_offset_memo = new_array; free(current_array); current_array = new_array; } double log_term = log(memo->alpha -.5 + (double) final_entry); current_array[final_entry] = current_array[final_entry - 1] + log_term; } // returns log(Gamma(memo->alpha + n / 2)) double offset_log_gamma_half(int64_t n, LogGammaHalfMemo* memo) { int64_t idx = n / 2; if (n % 2 == 0) { while (memo->zero_offset_final_entry < idx) { extend_gamma_zero_offset_memo(memo); } return memo->zero_offset_memo[idx]; } else { while (memo->half_offset_final_entry < idx) { extend_gamma_half_offset_memo(memo); } return memo->half_offset_memo[idx]; } } struct SumOfLogsMemo { double* memo_array; int64_t final_entry; int64_t array_length; }; SumOfLogsMemo* new_log_sum_memo() { SumOfLogsMemo* memo = (SumOfLogsMemo*) malloc(sizeof(SumOfLogsMemo)); double* base_case = (double*) malloc(sizeof(double)); base_case[0] = 0.0; memo->memo_array = base_case; memo->final_entry = 1; memo->array_length = 1; return memo; } void destroy_log_sum_memo(SumOfLogsMemo* memo) { free(memo->memo_array); free(memo); } void extend_log_sum_memo(SumOfLogsMemo* memo) { int64_t final_entry = memo->final_entry; int64_t current_length = memo->array_length; if (current_length == final_entry) { double* current_array = memo->memo_array; int64_t new_array_length = current_length * 2; double* new_array = (double*) malloc(sizeof(double) * new_array_length); for (int64_t i = 0; i < current_length; i++) { new_array[i] = current_array[i]; } memo->array_length = new_array_length; memo->memo_array = new_array; free(current_array); } double log_term = log((double) final_entry + 1); memo->memo_array[final_entry] = memo->memo_array[final_entry - 1] + log_term; (memo->final_entry)++; } double sum_of_logs(SumOfLogsMemo* memo, int64_t n) { while (n > memo->final_entry) { extend_log_sum_memo(memo); } return memo->memo_array[n - 1]; } // returns log(Gamma(n / 2)) in amortized constant time with low risk of overflow double log_gamma_half(int64_t n, SumOfLogsMemo* sum_of_logs_memo) { if (n <= 2) { fprintf(stderr, "log_gamma_half only supports n > 2\n"); exit(EXIT_FAILURE); } if (n % 2 == 0) { return sum_of_logs(sum_of_logs_memo, n / 2 - 1); } else { return LOG_ROOT_PI - (n / 2) * LOG_4 + sum_of_logs(sum_of_logs_memo, n - 1) - sum_of_logs(sum_of_logs_memo, n / 2); } } // returns log(x + y) without leaving log transformed space double add_logs(double log_x, double log_y) { if (log_x > log_y) { return log_x + log(1.0 + exp(log_y - log_x)); } else { return log_y + log(1.0 + exp(log_x - log_y)); } } // quick-select algorithm on array copy (does not alter original array) double quickselect(double* arr, int64_t length, int64_t target_idx) { if (target_idx < 0 || target_idx >= length) { fprintf(stderr, "Order statistic outside of array bounds\n"); exit(EXIT_FAILURE); } double* arr_copy = (double*) malloc(sizeof(double) * length); for (int64_t i = 0; i < length; i++ ) { arr_copy[i] = arr[i]; } int64_t low = 0; int64_t hi = length - 1; int64_t mid; int64_t median; double temp; while (true) { // median of three technique mid = (hi + low) / 2; if (arr_copy[hi] > arr_copy[mid]) { if (arr_copy[hi] > arr_copy[low]) { if (arr_copy[mid] > arr_copy[low]) { median = mid; } else { median = low; } } else { median = hi; } } else { if (arr_copy[hi] > arr_copy[low]) { median = hi; } else { if (arr_copy[mid] > arr_copy[low]) { median = low; } else { median = mid; } } } // remove pivot temp = arr_copy[median]; arr_copy[median] = arr_copy[hi]; arr_copy[hi] = temp; // partition array int64_t pivot = low; for (int64_t i = low; i < hi; i++) { if (arr_copy[i] < arr_copy[hi]) { temp = arr_copy[i]; arr_copy[i] = arr_copy[pivot]; arr_copy[pivot] = temp; pivot++; } } temp = arr_copy[pivot]; arr_copy[pivot] = arr_copy[hi]; arr_copy[hi] = temp; if (pivot == target_idx) { return arr_copy[pivot]; } else if (pivot < target_idx) { low = pivot + 1; } else { hi = pivot - 1; } } } double median(double* arr, int64_t length) { return quickselect(arr, length, length / 2); } double max(double* arr, int64_t length) { double curr_max = arr[0]; for (int64_t i = 1; i < length; i++) { if (arr[i] > curr_max) { curr_max = arr[i]; } } return curr_max; } // returns the index of the first element of arr greater or equal to x, assuming arr is sorted // returns final index if x is greater than all elements of arr int64_t bisect_left(double x, double* arr, int64_t length) { if (x <= arr[0]) { return 0; } int64_t low = 0; int64_t hi = length - 1; int64_t mid; double arr_mid; while (hi > low + 1) { mid = (hi + low) / 2; arr_mid = arr[mid]; if (x <= arr_mid) { hi = mid; } else { low = mid; } } return hi; } void spline_knot_slopes_internal(double* x, double* y, double* k, int64_t idx, double center_coef_prev, double right_coef_prev, double rhs_prev, int64_t final_idx) { if (idx == final_idx) { double left_coef = 1.0 / (x[idx] - x[idx - 1]); double center_coef = 2.0 * left_coef; double rhs = 3.0 * (y[idx] - y[idx - 1]) * left_coef * left_coef; // Cramer's rule k[idx] = (rhs * center_coef_prev - rhs_prev * left_coef) / (center_coef * center_coef_prev - right_coef_prev * left_coef); return; } double left_coef = 1.0 / (x[idx] - x[idx - 1]); double right_coef = 1.0 / (x[idx + 1] - x[idx]); double center_coef = 2.0 * (left_coef + right_coef); double rhs = 3.0 * ((y[idx] - y[idx - 1]) * left_coef * left_coef + (y[idx + 1] - y[idx]) * right_coef * right_coef); center_coef -= left_coef * right_coef_prev / center_coef_prev; rhs -= left_coef * rhs_prev / center_coef_prev; spline_knot_slopes_internal(x, y, k, idx + 1, center_coef, right_coef, rhs, final_idx); k[idx] = (rhs - right_coef * k[idx + 1]) / center_coef; } double* spline_knot_slopes(double* x, double* y, int64_t length) { double* k = (double*) malloc(sizeof(double) * length); double right_coef = 1.0 / (x[1] - x[0]); double center_coef = 2.0 * right_coef; double rhs = 3.0 * (y[1] - y[0]) * right_coef * right_coef; spline_knot_slopes_internal(x, y, k, 1, center_coef, right_coef, rhs, length - 1); k[0] = (rhs - right_coef * k[1]) / center_coef; return k; } double spline_interp(double query_x, double* x, double* y, double* slope, int64_t length) { if (query_x <= x[0]) { return y[0] - slope[0] * (x[0] - query_x); } else if (query_x >= x[length - 1]) { int64_t n = length - 1; return y[n] + slope[n] * (query_x - x[n]); } else { int64_t idx_right = bisect_left(query_x, x, length); int64_t idx_left = idx_right - 1; double dx = x[idx_right] - x[idx_left]; double dy = y[idx_right] - y[idx_left]; double a = slope[idx_left] * dx - dy; double b = dy - slope[idx_right] * dx; double t_left = (query_x - x[idx_left]) / dx; double t_right = 1.0 - t_left; return t_right * y[idx_left] + t_left * y[idx_right] + t_left * t_right * (a * t_right + b * t_left); } } // assumes even spacing of x points double grid_spline_interp(double query_x, double* x, double* y, double* slope, int64_t length) { if (query_x <= x[0]) { return y[0] - slope[0] * (x[0] - query_x); } else if (query_x >= x[length - 1]) { int64_t n = length - 1; return y[n] + slope[n] * (query_x - x[n]); } else { double dx = x[1] - x[0]; int64_t idx_left = (int64_t) ((query_x - x[0]) / dx); int64_t idx_right = idx_left + 1; double dy = y[idx_right] - y[idx_left]; double a = slope[idx_left] * dx - dy; double b = dy - slope[idx_right] * dx; double t_left = (query_x - x[idx_left]) / dx; double t_right = 1.0 - t_left; return t_right * y[idx_left] + t_left * y[idx_right] + t_left * t_right * (a * t_right + b * t_left); } } double* linspace(double start, double stop, int64_t length) { if (start >= stop) { fprintf(stderr, "linspace requires stop > start\n"); exit(EXIT_FAILURE); } double* lin = (double*) malloc(sizeof(double) * length); int64_t n = length - 1; double dx = (stop - start) / ((double) n); for (int64_t i = 0; i < n; i++) { lin[i] = start + i * dx; } lin[n] = stop; return lin; } double rand_standard_uniform() { return ((double) rand()) / ((double) RAND_MAX); } double rand_uniform(double a) { return ((double) rand()) / ((double) RAND_MAX / a); } bool rand_bernoulli(double p) { return (rand_standard_uniform() < p); } double rand_exponential(double lambda) { double draw; do { draw = rand_standard_uniform(); } while (draw == 1.0); return -log(1.0 - draw) / lambda; } double log_posterior_conditional_term(double nu_post, double two_alpha_post, double beta_post) {//, SumOfLogsMemo* memo) { // return log_gamma_half((int64_t) two_alpha_post, memo) // - .5 * (log(nu_post) + two_alpha_post * log(beta_post)); return lgamma( 0.5 * two_alpha_post) - .5 * (log(nu_post) + two_alpha_post * log(beta_post)); } void normal_inverse_gamma_params(double* x, int64_t length, double* mu_out, double* nu_out, double* alpha_out, double* beta_out) { double mean = 0.0; for (int64_t i = 0; i < length; i++) { mean += x[i]; } mean /= (double) length; double dev; double sum_sq_devs = 0.0; for (int64_t i = 0; i < length; i++) { dev = x[i] - mean; sum_sq_devs += dev * dev; } *mu_out = mean; *nu_out = (double) length; *alpha_out = ((double) length - 1.0) / 2.0; *beta_out = .5 * sum_sq_devs; } static double A_digamma[] = { 8.33333333333333333333E-2, -2.10927960927960927961E-2, 7.57575757575757575758E-3, -4.16666666666666666667E-3, 3.96825396825396825397E-3, -8.33333333333333333333E-3, 8.33333333333333333333E-2 }; // modified from Scipy source: https://github.com/scipy/scipy/blob/master/scipy/special/cephes/psi.c static double polevl(double x, double coef[], int N) { double ans; int i; double *p; p = coef; ans = *p++; i = N; do ans = ans * x + *p++; while (--i); return (ans); } double digamma(double x) { double p, q, nz, s, w, y, z; int i, n, negative; negative = 0; nz = 0.0; if (x <= 0.0) { negative = 1; q = x; p = floor(q); if (p == q) { fprintf(stderr, "Digamma evaluated at singularity.\n"); exit(EXIT_FAILURE); } /* Remove the zeros of tan(NPY_PI x) * by subtracting the nearest integer from x */ nz = q - p; if (nz != 0.5) { if (nz > 0.5) { p += 1.0; nz = q - p; } nz = M_PI / tan(M_PI * nz); } else { nz = 0.0; } x = 1.0 - x; } /* check for positive integer up to 10 */ if ((x <= 10.0) && (x == floor(x))) { y = 0.0; n = x; for (i = 1; i < n; i++) { w = i; y += 1.0 / w; } y -= EULER_MASCHERONI; goto digamma_done; } s = x; w = 0.0; while (s < 10.0) { w += 1.0 / s; s += 1.0; } if (s < 1.0e17) { z = 1.0 / (s * s); y = z * polevl(z, A_digamma, 6); } else y = 0.0; y = log(s) - (0.5 / s) - y - w; digamma_done: if (negative) { y -= nz; } return y; } // modified from SciPy source: https://github.com/scipy/scipy/blob/master/scipy/special/cephes/zeta.c static double A_zeta[] = { 12.0, -720.0, 30240.0, -1209600.0, 47900160.0, -1.8924375803183791606e9, /*1.307674368e12/691 */ 7.47242496e10, -2.950130727918164224e12, /*1.067062284288e16/3617 */ 1.1646782814350067249e14, /*5.109094217170944e18/43867 */ -4.5979787224074726105e15, /*8.028576626982912e20/174611 */ 1.8152105401943546773e17, /*1.5511210043330985984e23/854513 */ -7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091 */ }; double hurwitz_zeta(double x, double q) { int i; double a, b, k, s, t, w; if (x == 1.0) goto retinf; if (x < 1.0) { domerr: fprintf(stderr, "Domain error in zeta function.\n"); exit(EXIT_FAILURE); } if (q <= 0.0) { if (q == floor(q)) { retinf: fprintf(stderr, "Evaluted zeta function at singularity.\n"); exit(EXIT_FAILURE); } if (x != floor(x)) goto domerr; /* because q^-x not defined */ } /* Asymptotic expansion * http://dlmf.nist.gov/25.11#E43 */ if (q > 1e8) { return (1/(x - 1) + 1/(2*q)) * pow(q, 1 - x); } /* Euler-Maclaurin summation formula */ /* Permit negative q but continue sum until n+q > +9 . * This case should be handled by a reflection formula. * If q<0 and x is an integer, there is a relation to * the polyGamma function. */ s = pow(q, -x); a = q; i = 0; b = 0.0; while ((i < 9) || (a <= 9.0)) { i += 1; a += 1.0; b = pow(a, -x); s += b; if (fabs(b / s) < MACHEP) goto zeta_done; } w = a; s += b * w / (x - 1.0); s -= 0.5 * b; a = 1.0; k = 0.0; for (i = 0; i < 12; i++) { a *= x + k; b /= w; t = a * b / A_zeta[i]; s = s + t; t = fabs(t / s); if (t < MACHEP) goto zeta_done; k += 1.0; a *= x + k; b /= w; k += 1.0; } zeta_done: return (s); } double trigamma(double x) { return hurwitz_zeta(2.0, x); } double newton_approx_alpha(int64_t length, double sum_log_tau, double sum_tau) { double constant = sum_log_tau / length - log( sum_tau / length); double alpha = (double) 1.0; double f_alpha; double df_alpha; double alpha_prime; while (true) { f_alpha = log(alpha) - digamma(alpha) + constant; df_alpha = 1.0 / alpha - trigamma(alpha); if (df_alpha == 0.0 || df_alpha != df_alpha) { fprintf(stderr, "MLE estimation of alpha numerically unstable at designated starting value.\n"); exit(EXIT_FAILURE); } alpha_prime = alpha - f_alpha / df_alpha; if (fabs(alpha - alpha_prime) < MACHEP) { return alpha_prime; } alpha = alpha_prime; } } void mle_normal_inverse_gamma_params(double* mus, double* taus, int64_t length, double* mu_0_out, double* nu_out, double* alpha_out, double* beta_out) { double sum_tau = 0.0; double sum_log_tau = 0.0; for (int64_t i = 0; i < length; i++) { sum_tau += taus[i]; sum_log_tau += log(taus[i]); } double mu_0 = 0.0; for (int64_t i = 0; i < length; i++) { mu_0 += mus[i] * taus[i]; } mu_0 /= sum_tau; double sum_weighted_sq_devs = 0.0; double dev; for (int64_t i = 0; i < length; i++) { dev = mus[i] - mu_0; sum_weighted_sq_devs += taus[i] * dev * dev; } double nu = ((double) length) / sum_weighted_sq_devs; double alpha = newton_approx_alpha(length, sum_log_tau, sum_tau); double beta = length * alpha / sum_tau; *mu_0_out = mu_0; *nu_out = nu; *alpha_out = alpha; *beta_out = beta; } int64_t* stList_toIntPtr(stList* list, int64_t* length_out) { int64_t length = (int64_t) stList_length(list); int64_t* int_arr = (int64_t*) malloc(sizeof(int64_t) * length); int64_t* entry; for (int64_t i = 0; i < length; i++) { entry = (int64_t*) stList_get(list, i); int_arr[i] = *entry; } *length_out = length; return int_arr; } double* stList_toDoublePtr(stList* list, int64_t* length_out) { int64_t length = stList_length(list); double* double_arr = (double*) malloc(sizeof(double) * length); double* entry; for (int64_t i = 0; i < length; i++) { entry = (double*) stList_get(list, i); double_arr[i] = *entry; } *length_out = length; return double_arr; }
thread_omp.c
/* ** ** Author: Jim Rosinski ** ** Utility functions for OpenMP-based threading */ #include "config.h" // Must be first include #include "thread.h" #include "private.h" #include "gptl_papi.h" #include <omp.h> #include <stdio.h> #include <stdlib.h> // free volatile int GPTLnthreads = -1; // num threads: init to bad value volatile int GPTLmax_threads = -1; // max num threads // make threadid non-static due to this file possibly being inlined volatile int *GPTLthreadid = NULL; // array of thread ids /* ** GPTLthreadinit: Allocate and initialize GPTLthreadid; set max number of threads ** ** Output results: ** GPTLmax_threads: max number of threads ** ** GPTLthreadid[] is allocated and initialized to -1 ** ** ** Return value: 0 (success) or GPTLerror (failure) */ int GPTLthreadinit (void) { int t; static const char *thisfunc = "GPTLthreadinit"; if (omp_get_thread_num () != 0) return GPTLerror ("OMP %s: MUST only be called by the master thread\n", thisfunc); // Allocate the GPTLthreadid array which maps physical thread IDs to logical IDs // For OpenMP this will be just GPTLthreadid[iam] = iam; if (GPTLthreadid) return GPTLerror ("OMP %s: has already been called.\n" "Maybe mistakenly called by multiple threads?\n", thisfunc); // GPTLmax_threads may have been set by the user, in which case use that. But if as // yet uninitialized, set to the current value of OMP_NUM_THREADS. if (GPTLmax_threads == -1) GPTLmax_threads = MAX ((1), (omp_get_max_threads ())); if ( ! (GPTLthreadid = (int *) GPTLallocate (GPTLmax_threads * sizeof (int), thisfunc))) return GPTLerror ("OMP %s: malloc failure for %d elements of GPTLthreadid\n", thisfunc, GPTLmax_threads); // Initialize GPTLthreadid array to flag values for use by GPTLget_thread_num(). // get_thread_num() will fill in the values on first use. for (t = 0; t < GPTLmax_threads; ++t) GPTLthreadid[t] = -1; #ifdef VERBOSE printf ("GPTL: OMP %s: Set GPTLmax_threads=%d\n", thisfunc, GPTLmax_threads); #endif return 0; } /* ** GPTLthreadfinalize: clean up ** ** Output results: ** GPTLthreadid array is freed and array pointer nullified */ void GPTLthreadfinalize () { free ((void *) GPTLthreadid); GPTLthreadid = NULL; } /* ** GPTLget_thread_num: Determine thread number of the calling thread ** Start PAPI counters if enabled and first call for this thread. ** ** Output results: ** GPTLnthreads: Number of threads ** GPTLthreadid: Our thread id added to list on 1st call ** ** Return value: thread number (success) or GPTLerror (failure) ** 5/8/16: Modified to enable 2-level OMP nesting: Fold combination of current and parent ** thread info into a single index */ #ifdef INLINE_THREADING inline #endif int GPTLget_thread_num (void) { int t; static const char *thisfunc = "GPTLget_thread_num"; #ifdef ENABLE_NESTEDOMP int myid; // my thread id int lvl; // nest level: Currently only 2 nesting levels supported int parentid; // thread number of parent team int my_nthreads; // number of threads in the parent team myid = omp_get_thread_num (); if (omp_get_nested ()) { // nesting is "enabled", though not necessarily active lvl = omp_get_active_level (); // lvl=2 => inside 2 #pragma omp regions if (lvl < 2) { // 0 or 1-level deep: simply use thread id as index t = myid; } else if (lvl == 2) { // Create a unique id "t" for indexing into singly-dimensioned thread array parentid = omp_get_ancestor_thread_num (lvl-1); my_nthreads = omp_get_team_size (lvl); t = parentid*my_nthreads + myid; } else { return GPTLerror ("OMP %s: GPTL supports only 2 nested OMP levels got %d\n", thisfunc, lvl); } } else { // un-nested case: thread id is index t = myid; } #else t = omp_get_thread_num (); #endif if (t >= GPTLmax_threads) return GPTLerror ("OMP %s: returned id=%d exceeds GPTLmax_threads=%d\n", thisfunc, t, GPTLmax_threads); // If our thread number has already been set in the list, we are done if (t == GPTLthreadid[t]) return t; // Thread id not found. Modify GPTLthreadid with our ID, then start PAPI events if required. // Due to the setting of GPTLthreadid, everything below here will only execute once per thread. GPTLthreadid[t] = t; #ifdef VERBOSE printf ("GPTL: OMP %s: 1st call t=%d\n", thisfunc, t); #endif #ifdef HAVE_PAPI // When HAVE_PAPI is true, if 1 or more PAPI events are enabled, // create and start an event set for the new thread. if (GPTLget_npapievents () > 0) { #ifdef VERBOSE printf ("GPTL: OMP %s: Starting EventSet t=%d\n", thisfunc, t); #endif if (GPTLcreate_and_start_events (t) < 0) return GPTLerror ("GPTL: OMP %s: error from GPTLcreate_and_start_events for thread %d\n", thisfunc, t); } #endif // nthreads = GPTLmax_threads based on setting in GPTLthreadinit or user call to GPTLsetoption() GPTLnthreads = GPTLmax_threads; #ifdef VERBOSE printf ("GPTL: OMP %s: nthreads=%d\n", thisfunc, GPTLnthreads); #endif return t; } void GPTLprint_threadmapping (FILE *fp) { int t; fprintf (fp, "\n"); fprintf (fp, "Thread mapping:\n"); for (t = 0; t < GPTLnthreads; ++t) fprintf (fp, "GPTLthreadid[%d] = %d\n", t, GPTLthreadid[t]); }
residualbased_elimination_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #include <unordered_set> /* External includes */ #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "utilities/builtin_timer.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard elimination builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver); /// Definition of the base class typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; /// The definition of the current class typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; /// Definition of the classes from the base class typedef typename BaseType::SizeType SizeType; typedef typename BaseType::IndexType IndexType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; /// Definition of the equation id vector typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; /// Node definition typedef Node<3> NodeType; /// Containers definition typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolver() : BaseType() { } /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Constructor. */ explicit ResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BaseType(pNewLinearSystemSolver) { } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolver() override { } /** * @brief Create method * @param pNewLinearSystemSolver The linear solver for the system of equations * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); // Getting the elements from the model const int nelements = static_cast<int>(r_elements_array.size()); // Getting the array of the conditions const int nconditions = static_cast<int>(r_conditions_array.size()); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const auto it_elem_begin = r_elements_array.begin(); const auto it_cond_begin = r_conditions_array.begin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; // Assemble all elements const auto timer = BuiltinTimer(); #pragma omp parallel firstprivate(LHS_Contribution, RHS_Contribution, equation_id ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; ++k) { auto it_elem = it_elem_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_elem, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray); #else Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id); #endif } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; ++k) { auto it_cond = it_cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_cond, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info); #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray); #else Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id); #endif } } } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System build time: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished building" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building of the LHS * @details Depending on the implementation choosen the size of the matrix could be equal to the total number of Dofs or to the number of unrestrained dofs * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix */ void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA ) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); // Getting the elements from the model const int nelements = static_cast<int>(r_elements_array.size()); // Getting the array of the conditions const int nconditions = static_cast<int>(r_conditions_array.size()); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const auto it_elem_begin = r_elements_array.begin(); const auto it_cond_begin = r_conditions_array.begin(); // Resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); // Contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; #pragma omp parallel firstprivate(LHS_Contribution, equation_id ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; ++k) { auto it_elem = it_elem_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHS(rA, LHS_Contribution, equation_id); } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; ++k) { auto it_cond = it_cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHS(rA, LHS_Contribution, equation_id); } } } KRATOS_CATCH("") } /** * @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom * and "N" is the total number of degrees of freedom involved. * @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed * degrees of freedom (but keeping the columns!!) * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA ) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); // Getting the elements from the model const int nelements = static_cast<int>(r_elements_array.size()); // Getting the array of the conditions const int nconditions = static_cast<int>(r_conditions_array.size()); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const auto it_elem_begin = r_elements_array.begin(); const auto it_cond_begin = r_conditions_array.begin(); // Resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); // Contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; #pragma omp parallel firstprivate(LHS_Contribution, equation_id ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; ++k) { auto it_elem = it_elem_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id); } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; ++k) { auto it_cond = it_cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id); } } } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void SystemSolve( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(rb) != 0) { norm_b = TSparseSpace::TwoNorm(rb); } else { norm_b = 0.0; } if (norm_b != 0.0) { // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else TSparseSpace::SetToZero(rDx); // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, ModelPart& rModelPart ) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(rb) != 0) { norm_b = TSparseSpace::TwoNorm(rb); } else { norm_b = 0.0; } if (norm_b != 0.0) { // Provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, BaseType::mDofSet, rModelPart); // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY Timer::Start("Build"); Build(pScheme, rModelPart, rA, rb); Timer::Stop("Build"); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; const auto timer = BuiltinTimer(); Timer::Start("Solve"); SystemSolveWithPhysics(rA, rDx, rb, rModelPart); Timer::Stop("Solve"); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; KRATOS_CATCH("") } /** * @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BuildRHS(pScheme, rModelPart, rb); SystemSolve(rA, rDx, rb); KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) override { KRATOS_TRY // Resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } // Getting the Elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; // Assemble all elements #pragma omp parallel firstprivate( RHS_Contribution, equation_id) { const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->CalculateRHSContribution(*it_elem, RHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHS(rb, RHS_Contribution, equation_id); } } // Assemble all conditions const auto it_cond_begin = r_conditions_array.begin(); const int nconditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i < nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateRHSContribution(*it_cond, RHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHS(rb, RHS_Contribution, equation_id); } } } KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; // Gets the array of elements from the modeler ElementsArrayType& r_elements_array = rModelPart.Elements(); const int nelements = static_cast<int>(r_elements_array.size()); DofsVectorType elemental_dof_list; const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); SizeType nthreads = ParallelUtilities::GetNumThreads(); typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; std::vector<set_type> dofs_aux_list(nthreads); for (int i = 0; i < static_cast<int>(nthreads); ++i) { dofs_aux_list[i].reserve(nelements); } #pragma omp parallel for firstprivate(nelements, elemental_dof_list) for (int i = 0; i < static_cast<int>(nelements); ++i) { auto it_elem = r_elements_array.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, elemental_dof_list, r_current_process_info); dofs_aux_list[this_thread_id].insert(elemental_dof_list.begin(), elemental_dof_list.end()); } ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int nconditions = static_cast<int>(r_conditions_array.size()); #pragma omp parallel for firstprivate(nconditions, elemental_dof_list) for (int i = 0; i < nconditions; ++i) { auto it_cond = r_conditions_array.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, elemental_dof_list, r_current_process_info); dofs_aux_list[this_thread_id].insert(elemental_dof_list.begin(), elemental_dof_list.end()); } // Here we do a reduction in a tree so to have everything on thread 0 SizeType old_max = nthreads; SizeType new_max = ceil(0.5*static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); ++i) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5*static_cast<double>(old_max)); } DofsArrayType dof_temp; BaseType::mDofSet = DofsArrayType(); dof_temp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); ++it) { dof_temp.push_back(*it); } dof_temp.Sort(); BaseType::mDofSet = dof_temp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY if (mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(mLockArray.size()); i++) omp_destroy_lock(&mLockArray[i]); } mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mLockArray.size()); i++) omp_init_lock(&mLockArray[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem(ModelPart& rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } /** * @brief This method resize and initializes the system of euqations * @param pA The pointer to the LHS matrix * @param pDx The pointer to the vector of Unknowns * @param pb The pointer to the RHS vector * @param rModelPart The model part to be computed */ void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY if (pA == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& rA = *pA; TSystemVectorType& rDx = *pDx; TSystemVectorType& rb = *pb; // Resizing the system vectors and matrix if (rA.size1() == 0 || BaseType::GetReshapeMatrixFlag()) { // If the matrix is not initialized rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, rA, rModelPart); } else { if (rA.size1() != BaseType::mEquationSystemSize || rA.size2() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, rA, rModelPart); } } if (rDx.size() != BaseType::mEquationSystemSize) { rDx.resize(BaseType::mEquationSystemSize, false); } TSparseSpace::SetToZero(rDx); if (rb.size() != BaseType::mEquationSystemSize) { rb.resize(BaseType::mEquationSystemSize, false); } TSparseSpace::SetToZero(rb); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize; if (BaseType::mpReactionsVector->size() != reactions_vector_size) BaseType::mpReactionsVector->resize(reactions_vector_size, false); } KRATOS_CATCH("") } /** * @brief This method computes the reactions * @param pScheme The integration scheme considered * @param rModelPart The model part considered * @param rA The LHS of the system * @param rDx The vector of Unknowns * @param rb The RHS vector */ void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { //refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, rb); // Updating variables std::size_t i; TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) { i = (*it2)->EquationId(); if (i >= BaseType::mEquationSystemSize) { i -= BaseType::mEquationSystemSize; (*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i]; } } } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); this->mpReactionsVector.reset(); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "elimination_builder_and_solver" })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "elimination_builder_and_solver"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolver"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ #ifdef USE_LOCKS_IN_ASSEMBLY std::vector<omp_lock_t> mLockArray; #endif ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assembles the system * @param rA The LHS of the system * @param rb The RHS of the system * @param rLHSContribution The LHS local contribution * @param rRHSContribution The RHS local contribution * @param rEquationId The equation id * @param rLockArray The lock of the dof * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void Assemble( TSystemMatrixType& rA, TSystemVectorType& rb, const LocalSystemMatrixType& rLHSContribution, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId #ifdef USE_LOCKS_IN_ASSEMBLY ,std::vector< omp_lock_t >& rLockArray #endif ) { const SizeType local_size = rLHSContribution.size1(); for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&rLockArray[i_global]); rb[i_global] += rRHSContribution(i_local); #else double& r_a = rb[i_global]; const double& v_a = rRHSContribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&rLockArray[i_global]); #endif } //note that computation of reactions is not performed here! } } /** * @brief This method construcs the relationship between the DoF * @param pScheme The integration scheme * @param rA The LHS of the system * @param rModelPart The model part which defines the problem */ virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); const SizeType equation_size = BaseType::mEquationSystemSize; std::vector<std::unordered_set<IndexType> > indices(equation_size); #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { indices[iii].reserve(40); } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel firstprivate(ids) { // The process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<std::unordered_set<IndexType> > temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = it_elem_begin + i_elem; pScheme->EquationId( *it_elem, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = it_cond_begin + i_cond; pScheme->EquationId( *it_cond, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < indices.size(); ++i) nnz += indices[i].size(); rA = TSystemMatrixType(indices.size(), indices.size(), nnz); double* Avalues = rA.value_data().begin(); std::size_t* Arow_indices = rA.index1_data().begin(); std::size_t* Acol_indices = rA.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (IndexType i = 0; i < rA.size1(); ++i) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rA.size1()); ++i) { const IndexType row_begin = Arow_indices[i]; const IndexType row_end = Arow_indices[i + 1]; IndexType k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); ++it) { Acol_indices[k] = *it; Avalues[k] = 0.0; ++k; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } rA.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } /** * @brief This method assembles the LHS of the system * @param rA The LHS to assemble * @param rLHSContribution The local LHS contribution * @param rEquationId The equation id */ void AssembleLHS( TSystemMatrixType& rA, LocalSystemMatrixType& rLHSContribution, EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (IndexType j_local = 0; j_local < local_size; ++j_local) { const IndexType j_global = rEquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { rA(i_global, j_global) += rLHSContribution(i_local, j_local); } } } } } /** * @brief This function is equivalent to the AssembleRowContribution of the block builder and solver * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped */ inline void AssembleRowContributionFreeDofs( TSystemMatrixType& rA, const Matrix& rALocal, const IndexType i, const IndexType i_local, const Element::EquationIdVectorType& EquationId ) { double* values_vector = rA.value_data().begin(); IndexType* index1_vector = rA.index1_data().begin(); IndexType* index2_vector = rA.index2_data().begin(); const IndexType left_limit = index1_vector[i]; // Find the first entry // We iterate over the equation ids until we find the first equation id to be considered // We count in which component we find an ID IndexType last_pos = 0; IndexType last_found = 0; IndexType counter = 0; for(IndexType j=0; j < EquationId.size(); ++j) { ++counter; const IndexType j_global = EquationId[j]; if (j_global < BaseType::mEquationSystemSize) { last_pos = ForwardFind(j_global,left_limit,index2_vector); last_found = j_global; break; } } // If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered if (counter <= EquationId.size()) { #ifndef USE_LOCKS_IN_ASSEMBLY double& r_a = values_vector[last_pos]; const double& v_a = rALocal(i_local,counter - 1); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += rALocal(i_local,counter - 1); #endif // Now find all of the other entries IndexType pos = 0; for(IndexType j = counter; j < EquationId.size(); ++j) { IndexType id_to_find = EquationId[j]; if (id_to_find < BaseType::mEquationSystemSize) { if(id_to_find > last_found) pos = ForwardFind(id_to_find,last_pos+1,index2_vector); else if(id_to_find < last_found) pos = BackwardFind(id_to_find,last_pos-1,index2_vector); else pos = last_pos; #ifndef USE_LOCKS_IN_ASSEMBLY double& r = values_vector[pos]; const double& v = rALocal(i_local,j); #pragma omp atomic r += v; #else values_vector[pos] += rALocal(i_local,j); #endif last_found = id_to_find; last_pos = pos; } } } } inline IndexType ForwardFind(const IndexType id_to_find, const IndexType start, const IndexType* index_vector) { IndexType pos = start; while(id_to_find != index_vector[pos]) pos++; return pos; } inline IndexType BackwardFind(const IndexType id_to_find, const IndexType start, const IndexType* index_vector) { IndexType pos = start; while(id_to_find != index_vector[pos]) pos--; return pos; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method ensures that the contribution is unique */ inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } /** * @brief This method assembles the RHS of the system * @param rb The RHS to assemble * @param rRHSContribution The local RHS contribution * @param rEquationId The equation id */ void AssembleRHS( TSystemVectorType& rb, const LocalSystemVectorType& rRHSContribution, const EquationIdVectorType& rEquationId ) { SizeType local_size = rRHSContribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { // Free dof // ASSEMBLING THE SYSTEM VECTOR double& b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { //free dof // ASSEMBLING THE SYSTEM VECTOR double& b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; #pragma omp atomic b_value += rhs_value; } else { // Fixed dof double& b_value = r_reactions_vector[i_global - BaseType::mEquationSystemSize]; const double& rhs_value = rRHSContribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } /** * @brief This method assembles the LHS of the system (on free rows) * @param rA The LHS to assemble * @param rLHSContribution The local LHS contribution * @param rEquationId The equation id */ void AssembleLHSCompleteOnFreeRows( TSystemMatrixType& rA, LocalSystemMatrixType& rLHSContribution, EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (IndexType j_local = 0; j_local < local_size; ++j_local) { const IndexType j_global = rEquationId[j_local]; rA(i_global, j_global) += rLHSContribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
layerramdistancetransform.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2017-2020 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_LAYERRAMDISTANCETRANSFORM_H #define IVW_LAYERRAMDISTANCETRANSFORM_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/util/indexmapper.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #ifndef __clang__ #include <omp.h> #endif namespace inviwo { namespace util { /** * Implementation of Euclidean Distance Transform according to Saito's algorithm: * T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations * of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11). * pp. 1551-1565, 1994. * http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf * * Calculates the distance in base mat space * * Predicate is a function of type (const T &value) -> bool to deside if a value in the input * is a "feature". * * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all * squared distance values at the end of the calculation. * * ProcessCallback is a function of type (double progress) -> void that is called with a value * from 0 to 1 to indicate the progress of the calculation. */ template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename T, typename U> void layerRAMDistanceTransform(const LayerRAMPrecision<T> *inVolume, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample); template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename U, typename ProgressCallback> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback callback); template <typename U> void layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale); } // namespace util template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { #ifndef __clang__ omp_set_num_threads(std::thread::hardware_concurrency()); #endif using int64 = glm::int64; auto square = [](auto a) { return a * a; }; callback(0.0); const T *src = inLayer->getDataTyped(); U *dst = outDistanceField->getDataTyped(); const i64vec2 srcDim{inLayer->getDimensions()}; const i64vec2 dstDim{outDistanceField->getDimensions()}; const i64vec2 sm{upsample}; const auto squareBasis = glm::transpose(basis) * basis; const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]}; const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}}; const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize}; { const auto maxdist = glm::compMax(squareBasisDiag); bool orthogonal = true; for (size_t i = 0; i < squareBasis.length(); i++) { for (size_t j = 0; j < squareBasis.length(); j++) { if (i != j) { if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) { orthogonal = false; break; } } } } if (!orthogonal) { LogWarnCustom( "layerRAMDistanceTransform", "Calculating the distance transform on a non-orthogonal layer will not give " "correct values"); } } if (srcDim * sm != dstDim) { throw Exception( "DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) + " dst = " + toString(dstDim) + " scaling = " + toString(sm), IVW_CONTEXT_CUSTOM("layerRAMDistanceTransform")); } util::IndexMapper<2, int64> srcInd(srcDim); util::IndexMapper<2, int64> dstInd(dstDim); auto is_feature = [&](const int64 x, const int64 y) { return predicate(src[srcInd(x / sm.x, y / sm.y)]); }; // first pass, forward and backward scan along x // result: min distance in x direction #pragma omp parallel for for (int64 y = 0; y < dstDim.y; ++y) { // forward U dist = static_cast<U>(dstDim.x); for (int64 x = 0; x < dstDim.x; ++x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = squareVoxelSize.x * square(dist); } // backward dist = static_cast<U>(dstDim.x); for (int64 x = dstDim.x - 1; x >= 0; --x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist)); } } // second pass, scan y direction // for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY // result: min distance in x and y direction callback(0.45); #pragma omp parallel { std::vector<U> buff; buff.resize(dstDim.y); #pragma omp for for (int64 x = 0; x < dstDim.x; ++x) { // cache column data into temporary buffer for (int64 y = 0; y < dstDim.y; ++y) { buff[y] = dst[dstInd(x, y)]; } for (int64 y = 0; y < dstDim.y; ++y) { auto d = buff[y]; if (d != U(0)) { const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1; const auto rStart = std::min(rMax, y - 1); const auto rEnd = std::min(rMax, dstDim.y - y); for (int64 n = -rStart; n < rEnd; ++n) { const auto w = buff[y + n] + squareVoxelSize.y * square(n); if (w < d) d = w; } } dst[dstInd(x, y)] = d; } } } // scale data callback(0.9); const int64 layerSize = dstDim.x * dstDim.y; #pragma omp parallel for for (int64 i = 0; i < layerSize; ++i) { dst[i] = valueTransform(dst[i]); } callback(1.0); } template <typename T, typename U> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T> *inLayer, LayerRAMPrecision<U> *outDistanceField, const Matrix<2, U> basis, const size2_t upsample) { util::layerRAMDistanceTransform( inLayer, outDistanceField, basis, upsample, [](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; }, [](const U &squareDist) { return static_cast<U>(std::sqrt(static_cast<double>(squareDist))); }, [](double f) {}); } template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicate, valueTransform, callback); }); } template <typename U, typename ProgressCallback> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback progress) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { using ValueType = util::PrecisionValueType<decltype(lrprecision)>; const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; }; const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; }; const auto normPredicateIn = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) < threshold; }; const auto normPredicateOut = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) > threshold; }; const auto valTransIdent = [scale](const float &squareDist) { return static_cast<float>(scale * squareDist); }; const auto valTransSqrt = [scale](const float &squareDist) { return static_cast<float>(scale * std::sqrt(squareDist)); }; if (normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransIdent, progress); } else if (normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransIdent, progress); } else if (normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransSqrt, progress); } else if (normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransSqrt, progress); } else if (!normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransIdent, progress); } else if (!normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransIdent, progress); } else if (!normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransSqrt, progress); } else if (!normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransSqrt, progress); } }); } template <typename U> void util::layerDistanceTransform(const Layer *inLayer, LayerRAMPrecision<U> *outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale) { util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip, square, scale, [](double) {}); } } // namespace inviwo #endif // IVW_LAYERRAMDISTANCETRANSFORM_H
deactivation_utility.h
/* ============================================================================== KratosStructuralApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel pooyan@cimne.upc.edu rrossi@cimne.upc.edu janosch.stascheit@rub.de nagel@sd.rub.de - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain - Ruhr-University Bochum, Institute for Structural Mechanics, Germany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: hurga $ // Date: $Date: 2009-02-26 13:49:42 $ // Revision: $Revision: 1.7 $ // // #if !defined(KRATOS_DEACTIVATION_UTILITY_INCLUDED ) #define KRATOS_DEACTIVATION_UTILITY_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> #include <fstream> #include <cmath> #if !defined(isnan) #define isnan(x) ((x)!=(x)) #endif // External includes // Project includes #include "includes/define.h" #include "processes/process.h" #include "includes/node.h" #include "includes/element.h" #include "includes/model_part.h" #include "includes/variables.h" #include "includes/kratos_flags.h" #include "utilities/openmp_utils.h" #include "structural_application.h" namespace Kratos { /** * Deactivation and reactivation of elements * This process handles deactivation and reactivation * of elements and associated conditions. * In order to use this process, a variable calles * ACTIVATION_LEVEL of type int has to be defined. * The following values can be assigned to ACTIVATION_LEVEL: * ACTIVATION_LEVEL == 0: element is always active * ACTIVATION_LEVEL > 0: element will be deactivated on demand * ACTIVATION_LEVEL < 0: element is initially deactivated and * will be reactivated on demand */ class DeactivationUtility { public: typedef PointerVectorSet<Element> ElementsArrayType; typedef PointerVectorSet<Condition> ConditionsArrayType; /** * class pointer definition */ KRATOS_CLASS_POINTER_DEFINITION( DeactivationUtility ); /** * Default Constructor. * Please note that reactivation of elements does only work * as long as the process that deactivated the elements before * is living. */ DeactivationUtility() { mEchoLevel = 1; } /** * Constructor with echo level */ DeactivationUtility(int EchoLevel) { mEchoLevel = EchoLevel; } /** * Destructor. */ virtual ~DeactivationUtility() { } /** * Initializes all elements before performing any calculation. * This is done even for those elements that are deactivated * in the beginning of the simulation */ void Initialize( ModelPart& model_part ) { #ifndef _OPENMP if(mEchoLevel > 0) std::cout << "initializing deactivation utility" << std::endl; //initializing elements for ( ElementsArrayType::ptr_iterator it=model_part.Elements().ptr_begin(); it!=model_part.Elements().ptr_end(); ++it ) { if( (*it)->GetValue(ACTIVATION_LEVEL) < 0 ) { (*it)->Set(ACTIVE, false); } else { (*it)->Set(ACTIVE, true); } (*it)->Initialize(); } for ( ConditionsArrayType::ptr_iterator it=model_part.Conditions().ptr_begin(); it != model_part.Conditions().ptr_end(); ++it ) { if( (*it)->GetValue(IS_CONTACT_MASTER) || (*it)->GetValue(IS_CONTACT_SLAVE) ) { (*it)->SetValue(ACTIVATION_LEVEL, 0 ); } if( (*it)->GetValue(ACTIVATION_LEVEL) < 0 ) { (*it)->Set(ACTIVE, false); } else { (*it)->Set(ACTIVE, true); } (*it)->Initialize(); } #else if(mEchoLevel > 0) std::cout << "multithreaded initializing deactivation utility" << std::endl; int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, model_part.Elements().size(), element_partition); vector<unsigned int> condition_partition; OpenMPUtils::CreatePartition(number_of_threads, model_part.Conditions().size(), condition_partition); #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { ElementsArrayType::ptr_iterator it_begin = model_part.Elements().ptr_begin() + element_partition[k]; ElementsArrayType::ptr_iterator it_end = model_part.Elements().ptr_begin() + element_partition[k + 1]; for (ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { if( (*it)->GetValue(ACTIVATION_LEVEL) < 0 ) { (*it)->Set(ACTIVE, false); } else { (*it)->Set(ACTIVE, true); } // std::cout << "element of type " << typeid(*(*it)).name() << " Id = " << (*it)->Id() << " is going to be initialized" << std::endl; (*it)->Initialize(); } } #pragma omp parallel for for(int k = 0; k < number_of_threads; ++k) { ConditionsArrayType::ptr_iterator it_begin = model_part.Conditions().ptr_begin() + condition_partition[k]; ConditionsArrayType::ptr_iterator it_end = model_part.Conditions().ptr_begin() + condition_partition[k + 1]; for (ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { if( (*it)->GetValue(IS_CONTACT_MASTER) || (*it)->GetValue(IS_CONTACT_SLAVE) ) { (*it)->SetValue(ACTIVATION_LEVEL, 0 ); } if( (*it)->GetValue(ACTIVATION_LEVEL) < 0 ) { (*it)->Set(ACTIVE, false); } else { (*it)->Set(ACTIVE, true); } // std::cout << "condition of type " << typeid(*(*it)).name() << " Id = " << (*it)->Id() << " is going to be initialized" << std::endl; (*it)->Initialize(); } } #endif if(mEchoLevel > 0) std::cout << "deactivation utility initialized" << std::endl; } /** * Deactivates all elements and conditions marked with an * activation level in range (from_level, to_level). * Deactivated entities are stored in an intermediate * container and can be restored by calling Reactivate() or * ReactivateAll() */ void Deactivate( ModelPart& model_part, int from_level, int to_level ) { KRATOS_TRY; //first step: reactivate all elements and conditions ReactivateAll( model_part ); //second step: deactivate elements and conditions to be deactivated currently // identify elements to be deactivated for ( ElementsArrayType::ptr_iterator it=model_part.Elements().ptr_begin(); it!=model_part.Elements().ptr_end(); ++it) { //if element is to be deactivated if( ( (*it)->GetValue( ACTIVATION_LEVEL ) >= from_level && (*it)->GetValue( ACTIVATION_LEVEL ) <= to_level && (*it)->GetValue( ACTIVATION_LEVEL ) != 0 ) || ( (*it)->GetValue( ACTIVATION_LEVEL ) < 0 ) ) { (*it)->Set( ACTIVE, false ); } } for( ConditionsArrayType::ptr_iterator it = model_part.Conditions().ptr_begin(); it != model_part.Conditions().ptr_end(); ++it ) { // std::cout << "condition: " << (*it)->Id() << "has activation level: " << (*it)->GetValue( ACTIVATION_LEVEL ) << std::endl; if( ( (*it)->GetValue( ACTIVATION_LEVEL ) >= from_level && (*it)->GetValue( ACTIVATION_LEVEL ) <= to_level && (*it)->GetValue( ACTIVATION_LEVEL ) != 0 ) || ( (*it)->GetValue( ACTIVATION_LEVEL ) < 0 ) ) { if( !( (*it)->GetValue( IS_CONTACT_MASTER ) || (*it)->GetValue( IS_CONTACT_SLAVE ) ) ) { (*it)->Set( ACTIVE, false ); } } } KRATOS_CATCH("") } /** * Reactivates all elements and conditions stored in the * intermediate containers */ void ReactivateAll( ModelPart& model_part ) { for ( ElementsArrayType::ptr_iterator it=model_part.Elements().ptr_begin(); it!=model_part.Elements().ptr_end(); ++it) { (*it)->Set( ACTIVE, true ); } for( ConditionsArrayType::ptr_iterator it = model_part.Conditions().ptr_begin(); it != model_part.Conditions().ptr_end(); ++it ) { (*it)->Set( ACTIVE, true ); } } /** * reactivate all elements with activation level in range * (from_level, to_level) */ void Reactivate( ModelPart& model_part, int from_level, int to_level ) { KRATOS_TRY; for ( ElementsArrayType::ptr_iterator it=model_part.Elements().ptr_begin(); it!=model_part.Elements().ptr_end(); ++it) { //if element is to be reactivated if( (*it)->GetValue( ACTIVATION_LEVEL ) >= from_level && (*it)->GetValue( ACTIVATION_LEVEL ) <= to_level) { (*it)->SetValue(ACTIVATION_LEVEL, 0 ); } } for( ConditionsArrayType::ptr_iterator it = model_part.Conditions().ptr_begin(); it != model_part.Conditions().ptr_end(); ++it ) { if( ! ( (*it)->GetValue( IS_CONTACT_MASTER ) || (*it)->GetValue(IS_CONTACT_SLAVE) ) ) { if( (*it)->GetValue( ACTIVATION_LEVEL ) >= from_level && (*it)->GetValue(ACTIVATION_LEVEL) <= to_level) { (*it)->SetValue(ACTIVATION_LEVEL, 0); } } } KRATOS_CATCH(""); } /** * reactivate all elements with activation level in range * (from_level, to_level) */ void ReactivateStressFree( ModelPart& model_part, int from_level, int to_level ) { KRATOS_TRY; for ( ElementsArrayType::ptr_iterator it=model_part.Elements().ptr_begin(); it!=model_part.Elements().ptr_end(); ++it) { //if element is to be reactivated if( (*it)->GetValue( ACTIVATION_LEVEL ) >= from_level && (*it)->GetValue( ACTIVATION_LEVEL ) <= to_level) { (*it)->Initialize(); (*it)->SetValue(ACTIVATION_LEVEL, 0 ); } } for( ConditionsArrayType::ptr_iterator it = model_part.Conditions().ptr_begin(); it != model_part.Conditions().ptr_end(); ++it ) { if( ! ( (*it)->GetValue( IS_CONTACT_MASTER ) || (*it)->GetValue(IS_CONTACT_SLAVE) ) ) { if( (*it)->GetValue( ACTIVATION_LEVEL ) >= from_level && (*it)->GetValue(ACTIVATION_LEVEL) <= to_level) { (*it)->SetValue(ACTIVATION_LEVEL, 0); } } } KRATOS_CATCH(""); } /// Get name from an abject template<class Object> std::string GetName(const Object& o) { return std::string(typeid(o).name()); } /** * Turn back information as a string. */ virtual std::string Info() const { return "DeactivationUtility"; } /** * Print information about this object. */ virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "DeactivationUtility"; } /** * Print object's data. */ virtual void PrintData(std::ostream& rOStream) const { } private: /** * Containers for deactivated elements and conditions */ PointerVector<Element> mDeactivatedElements; PointerVector<Condition> mDeactivatedConditions; int mEchoLevel; /** * Assignment operator */ //DeactivationUtility& operator=(DeactivationUtility const& rOther); /** * Copy constructor */ //DeactivationUtility(DeactivationUtility const& rOther); };//class DeactivationUtility } // namespace Kratos. #endif // KRATOS_DEACTIVATION_UTILITY_INCLUDED defined
convolution_winograd_dot_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_winograd_dot_pack8_fp16sa_neon(Mat& bottom_blob_tm, int outch, const Mat& kernel_tm, Mat& top_blob_tm, const Option& opt) { // Mat bottom_blob_tm(tiles, 16/36/64, inch, 16u, 8, opt.workspace_allocator); const int tiles = bottom_blob_tm.w; const int batch = bottom_blob_tm.h; const int inch = bottom_blob_tm.c; // permute Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, batch, 16u, 8, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, batch, 16u, 8, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, batch, 16u, 8, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, batch, 16u, 8, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, batch, 16u, 8, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < batch; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { __fp16* tm2p = tm2.row<__fp16>(i / 12); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0", "v1"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tmpptr) // %1 : "0"(r0), "1"(tmpptr) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, batch, outch, 16u, 8, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < batch; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const __fp16* r0 = bb2.row<const __fp16>(i / 12); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123 "fmla v20.8h, v12.8h, v0.h[0] \n" "fmla v21.8h, v12.8h, v0.h[1] \n" "fmla v22.8h, v12.8h, v0.h[2] \n" "fmla v23.8h, v12.8h, v0.h[3] \n" "fmla v24.8h, v12.8h, v0.h[4] \n" "fmla v25.8h, v12.8h, v0.h[5] \n" "fmla v26.8h, v12.8h, v0.h[6] \n" "fmla v27.8h, v12.8h, v0.h[7] \n" "fmla v28.8h, v12.8h, v1.h[0] \n" "fmla v29.8h, v12.8h, v1.h[1] \n" "fmla v30.8h, v12.8h, v1.h[2] \n" "fmla v31.8h, v12.8h, v1.h[3] \n" "fmla v20.8h, v13.8h, v1.h[4] \n" "fmla v21.8h, v13.8h, v1.h[5] \n" "fmla v22.8h, v13.8h, v1.h[6] \n" "fmla v23.8h, v13.8h, v1.h[7] \n" "fmla v24.8h, v13.8h, v2.h[0] \n" "fmla v25.8h, v13.8h, v2.h[1] \n" "fmla v26.8h, v13.8h, v2.h[2] \n" "fmla v27.8h, v13.8h, v2.h[3] \n" "fmla v28.8h, v13.8h, v2.h[4] \n" "fmla v29.8h, v13.8h, v2.h[5] \n" "fmla v30.8h, v13.8h, v2.h[6] \n" "fmla v31.8h, v13.8h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v20.8h, v14.8h, v3.h[0] \n" "fmla v21.8h, v14.8h, v3.h[1] \n" "fmla v22.8h, v14.8h, v3.h[2] \n" "fmla v23.8h, v14.8h, v3.h[3] \n" "fmla v24.8h, v14.8h, v3.h[4] \n" "fmla v25.8h, v14.8h, v3.h[5] \n" "fmla v26.8h, v14.8h, v3.h[6] \n" "fmla v27.8h, v14.8h, v3.h[7] \n" "fmla v28.8h, v14.8h, v4.h[0] \n" "fmla v29.8h, v14.8h, v4.h[1] \n" "fmla v30.8h, v14.8h, v4.h[2] \n" "fmla v31.8h, v14.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567 "fmla v20.8h, v15.8h, v4.h[4] \n" "fmla v21.8h, v15.8h, v4.h[5] \n" "fmla v22.8h, v15.8h, v4.h[6] \n" "fmla v23.8h, v15.8h, v4.h[7] \n" "fmla v24.8h, v15.8h, v5.h[0] \n" "fmla v25.8h, v15.8h, v5.h[1] \n" "fmla v26.8h, v15.8h, v5.h[2] \n" "fmla v27.8h, v15.8h, v5.h[3] \n" "fmla v28.8h, v15.8h, v5.h[4] \n" "fmla v29.8h, v15.8h, v5.h[5] \n" "fmla v30.8h, v15.8h, v5.h[6] \n" "fmla v31.8h, v15.8h, v5.h[7] \n" "fmla v20.8h, v16.8h, v6.h[0] \n" "fmla v21.8h, v16.8h, v6.h[1] \n" "fmla v22.8h, v16.8h, v6.h[2] \n" "fmla v23.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v16.8h, v6.h[4] \n" "fmla v25.8h, v16.8h, v6.h[5] \n" "fmla v26.8h, v16.8h, v6.h[6] \n" "fmla v27.8h, v16.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v7.h[0] \n" "fmla v29.8h, v16.8h, v7.h[1] \n" "fmla v30.8h, v16.8h, v7.h[2] \n" "fmla v31.8h, v16.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011 "fmla v20.8h, v17.8h, v7.h[4] \n" "fmla v21.8h, v17.8h, v7.h[5] \n" "fmla v22.8h, v17.8h, v7.h[6] \n" "fmla v23.8h, v17.8h, v7.h[7] \n" "fmla v24.8h, v17.8h, v8.h[0] \n" "fmla v25.8h, v17.8h, v8.h[1] \n" "fmla v26.8h, v17.8h, v8.h[2] \n" "fmla v27.8h, v17.8h, v8.h[3] \n" "fmla v28.8h, v17.8h, v8.h[4] \n" "fmla v29.8h, v17.8h, v8.h[5] \n" "fmla v30.8h, v17.8h, v8.h[6] \n" "fmla v31.8h, v17.8h, v8.h[7] \n" "fmla v20.8h, v18.8h, v9.h[0] \n" "fmla v21.8h, v18.8h, v9.h[1] \n" "fmla v22.8h, v18.8h, v9.h[2] \n" "fmla v23.8h, v18.8h, v9.h[3] \n" "fmla v24.8h, v18.8h, v9.h[4] \n" "fmla v25.8h, v18.8h, v9.h[5] \n" "fmla v26.8h, v18.8h, v9.h[6] \n" "fmla v27.8h, v18.8h, v9.h[7] \n" "fmla v28.8h, v18.8h, v10.h[0] \n" "fmla v29.8h, v18.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v10.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.8h, v19.8h, v10.h[4] \n" "fmla v21.8h, v19.8h, v10.h[5] \n" "fmla v22.8h, v19.8h, v10.h[6] \n" "fmla v23.8h, v19.8h, v10.h[7] \n" "fmla v24.8h, v19.8h, v11.h[0] \n" "fmla v25.8h, v19.8h, v11.h[1] \n" "fmla v26.8h, v19.8h, v11.h[2] \n" "fmla v27.8h, v19.8h, v11.h[3] \n" "fmla v28.8h, v19.8h, v11.h[4] \n" "fmla v29.8h, v19.8h, v11.h[5] \n" "fmla v30.8h, v19.8h, v11.h[6] \n" "fmla v31.8h, v19.8h, v11.h[7] \n" "bne 0b \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "fmla v16.8h, v12.8h, v4.h[0] \n" "fmla v17.8h, v12.8h, v4.h[1] \n" "fmla v18.8h, v12.8h, v4.h[2] \n" "fmla v19.8h, v12.8h, v4.h[3] \n" "fmla v20.8h, v12.8h, v4.h[4] \n" "fmla v21.8h, v12.8h, v4.h[5] \n" "fmla v22.8h, v12.8h, v4.h[6] \n" "fmla v23.8h, v12.8h, v4.h[7] \n" "fmla v16.8h, v13.8h, v5.h[0] \n" "fmla v17.8h, v13.8h, v5.h[1] \n" "fmla v18.8h, v13.8h, v5.h[2] \n" "fmla v19.8h, v13.8h, v5.h[3] \n" "fmla v20.8h, v13.8h, v5.h[4] \n" "fmla v21.8h, v13.8h, v5.h[5] \n" "fmla v22.8h, v13.8h, v5.h[6] \n" "fmla v23.8h, v13.8h, v5.h[7] \n" "fmla v16.8h, v14.8h, v6.h[0] \n" "fmla v17.8h, v14.8h, v6.h[1] \n" "fmla v18.8h, v14.8h, v6.h[2] \n" "fmla v19.8h, v14.8h, v6.h[3] \n" "fmla v20.8h, v14.8h, v6.h[4] \n" "fmla v21.8h, v14.8h, v6.h[5] \n" "fmla v22.8h, v14.8h, v6.h[6] \n" "fmla v23.8h, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v7.h[0] \n" "fmla v17.8h, v15.8h, v7.h[1] \n" "fmla v18.8h, v15.8h, v7.h[2] \n" "fmla v19.8h, v15.8h, v7.h[3] \n" "fmla v20.8h, v15.8h, v7.h[4] \n" "fmla v21.8h, v15.8h, v7.h[5] \n" "fmla v22.8h, v15.8h, v7.h[6] \n" "fmla v23.8h, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v18.8h, v8.8h, v2.h[0] \n" "fmla v19.8h, v8.8h, v3.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v2.h[1] \n" "fmla v19.8h, v9.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v3.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v18.8h, v11.8h, v2.h[3] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v18.8h, v12.8h, v2.h[4] \n" "fmla v19.8h, v12.8h, v3.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v18.8h, v13.8h, v2.h[5] \n" "fmla v19.8h, v13.8h, v3.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "fmla v18.8h, v14.8h, v2.h[6] \n" "fmla v19.8h, v14.8h, v3.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "fmla v18.8h, v15.8h, v2.h[7] \n" "fmla v19.8h, v15.8h, v3.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 1 < tiles; i += 2) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* k0 = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8h}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "bne 0b \n" "st1 {v16.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); } } } }
sparse_msg_restrict.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" /*-------------------------------------------------------------------------- * hypre_SparseMSGRestrictData data structure *--------------------------------------------------------------------------*/ typedef struct { hypre_StructMatrix *R; hypre_ComputePkg *compute_pkg; hypre_Index cindex; hypre_Index stride; hypre_Index strideR; HYPRE_Int time_index; } hypre_SparseMSGRestrictData; /*-------------------------------------------------------------------------- * hypre_SparseMSGRestrictCreate *--------------------------------------------------------------------------*/ void * hypre_SparseMSGRestrictCreate( ) { hypre_SparseMSGRestrictData *restrict_data; restrict_data = hypre_CTAlloc(hypre_SparseMSGRestrictData, 1); (restrict_data -> time_index) = hypre_InitializeTiming("SparseMSGRestrict"); return (void *) restrict_data; } /*-------------------------------------------------------------------------- * hypre_SparseMSGRestrictSetup *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SparseMSGRestrictSetup( void *restrict_vdata, hypre_StructMatrix *R, hypre_StructVector *r, hypre_StructVector *rc, hypre_Index cindex, hypre_Index findex, hypre_Index stride, hypre_Index strideR ) { hypre_SparseMSGRestrictData *restrict_data = (hypre_SparseMSGRestrictData *)restrict_vdata; hypre_StructGrid *grid; hypre_StructStencil *stencil; hypre_ComputeInfo *compute_info; hypre_ComputePkg *compute_pkg; HYPRE_Int ierr = 0; /*---------------------------------------------------------- * Set up the compute package *----------------------------------------------------------*/ grid = hypre_StructVectorGrid(r); stencil = hypre_StructMatrixStencil(R); hypre_CreateComputeInfo(grid, stencil, &compute_info); hypre_ComputeInfoProjectSend(compute_info, findex, stride); hypre_ComputeInfoProjectRecv(compute_info, findex, stride); hypre_ComputeInfoProjectComp(compute_info, cindex, stride); hypre_ComputePkgCreate(compute_info, hypre_StructVectorDataSpace(r), 1, grid, &compute_pkg); /*---------------------------------------------------------- * Set up the restrict data structure *----------------------------------------------------------*/ (restrict_data -> R) = hypre_StructMatrixRef(R); (restrict_data -> compute_pkg) = compute_pkg; hypre_CopyIndex(cindex ,(restrict_data -> cindex)); hypre_CopyIndex(stride ,(restrict_data -> stride)); hypre_CopyIndex(strideR ,(restrict_data -> strideR)); return ierr; } /*-------------------------------------------------------------------------- * hypre_SparseMSGRestrict: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SparseMSGRestrict( void *restrict_vdata, hypre_StructMatrix *R, hypre_StructVector *r, hypre_StructVector *rc ) { HYPRE_Int ierr = 0; hypre_SparseMSGRestrictData *restrict_data = (hypre_SparseMSGRestrictData *)restrict_vdata; hypre_ComputePkg *compute_pkg; hypre_IndexRef cindex; hypre_IndexRef stride; hypre_IndexRef strideR; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; hypre_CommHandle *comm_handle; hypre_BoxArrayArray *compute_box_aa; hypre_BoxArray *compute_box_a; hypre_Box *compute_box; hypre_Box *R_dbox; hypre_Box *r_dbox; hypre_Box *rc_dbox; HYPRE_Int Ri; HYPRE_Int ri; HYPRE_Int rci; HYPRE_Real *Rp0, *Rp1; HYPRE_Real *rp, *rp0, *rp1; HYPRE_Real *rcp; hypre_Index loop_size; hypre_IndexRef start; hypre_Index startc; hypre_Index startR; hypre_Index stridec; hypre_StructStencil *stencil; hypre_Index *stencil_shape; HYPRE_Int compute_i, fi, ci, j; /*----------------------------------------------------------------------- * Initialize some things. *-----------------------------------------------------------------------*/ hypre_BeginTiming(restrict_data -> time_index); compute_pkg = (restrict_data -> compute_pkg); cindex = (restrict_data -> cindex); stride = (restrict_data -> stride); strideR = (restrict_data -> strideR); stencil = hypre_StructMatrixStencil(R); stencil_shape = hypre_StructStencilShape(stencil); hypre_SetIndex3(stridec, 1, 1, 1); /*-------------------------------------------------------------------- * Restrict the residual. *--------------------------------------------------------------------*/ fgrid = hypre_StructVectorGrid(r); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructVectorGrid(rc); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); for (compute_i = 0; compute_i < 2; compute_i++) { switch(compute_i) { case 0: { rp = hypre_StructVectorData(r); hypre_InitializeIndtComputations(compute_pkg, rp, &comm_handle); compute_box_aa = hypre_ComputePkgIndtBoxes(compute_pkg); } break; case 1: { hypre_FinalizeIndtComputations(comm_handle); compute_box_aa = hypre_ComputePkgDeptBoxes(compute_pkg); } break; } fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } compute_box_a = hypre_BoxArrayArrayBoxArray(compute_box_aa, fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); r_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(r), fi); rc_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(rc), ci); Rp0 = hypre_StructMatrixBoxData(R, fi, 1) - hypre_BoxOffsetDistance(R_dbox, stencil_shape[1]); Rp1 = hypre_StructMatrixBoxData(R, fi, 0); rp = hypre_StructVectorBoxData(r, fi); rp0 = rp + hypre_BoxOffsetDistance(r_dbox, stencil_shape[0]); rp1 = rp + hypre_BoxOffsetDistance(r_dbox, stencil_shape[1]); rcp = hypre_StructVectorBoxData(rc, ci); hypre_ForBoxI(j, compute_box_a) { compute_box = hypre_BoxArrayBox(compute_box_a, j); start = hypre_BoxIMin(compute_box); hypre_StructMapFineToCoarse(start, cindex, stride, startc); hypre_StructMapCoarseToFine(startc, cindex, strideR, startR); hypre_BoxGetStrideSize(compute_box, stride, loop_size); hypre_BoxLoop3Begin(hypre_StructMatrixNDim(R), loop_size, R_dbox, startR, strideR, Ri, r_dbox, start, stride, ri, rc_dbox, startc, stridec, rci); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,Ri,ri,rci) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(Ri, ri, rci) { rcp[rci] = rp[ri] + (Rp0[Ri] * rp0[ri] + Rp1[Ri] * rp1[ri]); } hypre_BoxLoop3End(Ri, ri, rci); } } } /*----------------------------------------------------------------------- * Return *-----------------------------------------------------------------------*/ hypre_IncFLOPCount(4*hypre_StructVectorGlobalSize(rc)); hypre_EndTiming(restrict_data -> time_index); return ierr; } /*-------------------------------------------------------------------------- * hypre_SparseMSGRestrictDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SparseMSGRestrictDestroy( void *restrict_vdata ) { HYPRE_Int ierr = 0; hypre_SparseMSGRestrictData *restrict_data = (hypre_SparseMSGRestrictData *)restrict_vdata; if (restrict_data) { hypre_StructMatrixDestroy(restrict_data -> R); hypre_ComputePkgDestroy(restrict_data -> compute_pkg); hypre_FinalizeTiming(restrict_data -> time_index); hypre_TFree(restrict_data); } return ierr; }
GB_unop__identity_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fp64) // op(A') function: GB (_unop_tran__identity_uint64_fp64) // C type: uint64_t // A type: double // cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fp64) ( uint64_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fused_rowwise_nbitfake_conversion_ops.h
#pragma once #ifdef _OPENMP #include <omp.h> #endif #include "caffe2/core/context.h" #include "caffe2/core/logging.h" #include "caffe2/core/operator.h" #include "caffe2/operators/reducer_functors.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace internal { inline bool is_little_endian() { constexpr std::int32_t kValue = 1; return reinterpret_cast<const std::uint8_t*>(&kValue)[0] == 1; } void convertfp32fp32(float* dst, const float* src, size_t N); void convertfp16fp32(float* dst, const at::Half* src, size_t N); /** * @params Xmin initial solution passed and potentiall better solution returns * @params Xmax initial solution passed and potentiall better solution returns */ void param_search_greedy( const float* X, int N, const int n_bins, // = 200, const float ratio, // = 0.16, float& Xmin, float& Xmax, int bit_rate); } // namespace internal // Fake 2/4 bit quantization // Creates a 2/4bit rowwise quantized blob with scales and biases in fp16 // The storage format is 8 bit rowwise with scales and biases in fp32 template < int BIT_RATE, typename T, void (*convert)(float* dst, const T* src, size_t N), bool GREEDY = false> class FloatToFusedNBitFakeRowwiseQuantizedOp final : public Operator<CPUContext> { public: FloatToFusedNBitFakeRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws) : Operator<CPUContext>(def, ws) {} ~FloatToFusedNBitFakeRowwiseQuantizedOp() override {} bool RunOnDevice() override { CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness"); const auto& input = Input(DATA_FLOAT); const auto input_rows = input.size(0); const auto input_columns = input.size(1); CAFFE_ENFORCE_EQ(input.dim(), 2, "Expect input to be a matrix"); const std::vector<int64_t> output_dimensions = {input_rows, input_columns + 8}; auto* output = Output( DATA_FUSED_SCALE_BIAS_INT8, output_dimensions, at::dtype<uint8_t>()); const auto* input_data = input.template data<T>(); auto* output_data = output->template mutable_data<uint8_t>(); const auto output_columns = output->size(1); if (!std::is_same<T, float>::value && !std::is_same<T, at::Half>::value) { CAFFE_THROW("Unsupported data type"); } #ifdef _OPENMP vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1)); #else vector<float> tmp_vec(input_columns); #endif #pragma omp parallel for if (GREEDY) for (int row = 0; row < input_rows; ++row) { float* tmp = tmp_vec.data(); #ifdef _OPENMP if (GREEDY) { tmp = &tmp_vec[omp_get_thread_num() * input_columns]; } #endif convert(tmp, input_data + row * input_columns, input_columns); uint8_t* output_row = output_data + row * output_columns; float* output_row_scale_bias = reinterpret_cast<float*>(output_row + input_columns); float minimum_element = *std::min_element(tmp, tmp + input_columns); float maximum_element = *std::max_element(tmp, tmp + input_columns); if (GREEDY) { internal::param_search_greedy( tmp, input_columns, 200, 0.16, minimum_element, maximum_element, BIT_RATE); } minimum_element = static_cast<at::Half>(minimum_element); const float range = maximum_element - minimum_element; const float scale = range == 0 ? 1.0f : static_cast<float>(static_cast<at::Half>( range / static_cast<float>((1 << BIT_RATE) - 1))); const float inverse_scale = 1.0f / scale; output_row_scale_bias[0] = scale; output_row_scale_bias[1] = minimum_element; // NOLINTNEXTLINE(clang-diagnostic-sign-compare) for (size_t col = 0; col < input_columns; ++col) { output_row[col] = std::max( 0, std::min<int>( std::lrintf((tmp[col] - minimum_element) * inverse_scale), (1 << BIT_RATE) - 1)); } } return true; } private: INPUT_TAGS(DATA_FLOAT); // INT8 suffix because this is a fake quantization operator whose output // type is always 8-bit regardless of BIT_RATE. OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8); }; } // namespace caffe2
DRB017-outputdep-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The loop in this example cannot be parallelized. Data race pairs: we allow two pairs to preserve the original code pattern. 1. x@71:12 vs. x@72:5 2. x@72:5 vs. x@72:5 */ #include "omprace.h" #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { omprace_init(); int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; int i,x=10; #pragma omp parallel for for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d, a[0]=%d\n",x,a[0]); omprace_fini(); return 0; }
bicubic_interpolation.h
// This program is free software: you can use, modify and/or redistribute it // under the terms of the simplified BSD License. You should have received a // copy of this license along this program. If not, see // <http://www.opensource.org/licenses/bsd-license.html>. // // Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es> // All rights reserved. #ifndef BICUBIC_INTERPOLATION_H #define BICUBIC_INTERPOLATION_H #define BOUNDARY_CONDITION 0 //0 Neumann //1 Periodic //2 Symmetric /** * * Neumann boundary condition test * **/ int neumann_bc (int x, int nx, bool & out) { if (x < 0) { x = 0; out = true; } else if (x >= nx) { x = nx - 1; out = true; } return x; } /** * * Periodic boundary condition test * **/ int periodic_bc (int x, int nx, bool & out) { if (x < 0) { const int n = 1 - (int) (x / (nx + 1)); const int ixx = x + n * nx; x = ixx % nx; out = true; } else if (x >= nx) { x = x % nx; out = true; } return x; } /** * * Symmetric boundary condition test * **/ int symmetric_bc (int x, int nx, bool & out) { if (x < 0) { const int borde = nx - 1; const int xx = -x; const int n = (int) (xx / borde) % 2; if (n) x = borde - (xx % borde); else x = xx % borde; out = true; } else if (x >= nx) { const int borde = nx - 1; const int n = (int) (x / borde) % 2; if (n) x = borde - (x % borde); else x = x % borde; out = true; } return x; } /** * * Bicubic interpolation in one dimension * **/ double cubic_interpolation (double v[4], //interpolation points double x //point to be interpolated ) { return v[1] + 0.5 * x * (v[2] - v[0] + x * (2.0 * v[0] - 5.0 * v[1] + 4.0 * v[2] - v[3] + x * (3.0 * (v[1] - v[2]) + v[3] - v[0]))); } /** * * Bicubic interpolation in two dimension * **/ double bicubic_interpolation (double p[4][4], //array containing the interpolation points double x, //x position to be interpolated double y //y position to be interpolated ) { double v[4]; v[0] = cubic_interpolation (p[0], y); v[1] = cubic_interpolation (p[1], y); v[2] = cubic_interpolation (p[2], y); v[3] = cubic_interpolation (p[3], y); return cubic_interpolation (v, x); } /** * * Compute the bicubic interpolation of a point in an image. * Detects if the point goes outside the image domain * **/ float bicubic_interpolation (const float *input, //image to be interpolated const float uu, //x component of the vector field const float vv, //y component of the vector field const int nx, //width of the image const int ny, //height of the image const bool border_out = false //if true, put zeros outside the region ) { const int sx = (uu < 0) ? -1 : 1; const int sy = (vv < 0) ? -1 : 1; int x, y, mx, my, dx, dy, ddx, ddy; bool out = false; switch (BOUNDARY_CONDITION) { case 0: x = neumann_bc ((int) uu, nx, out); y = neumann_bc ((int) vv, ny, out); mx = neumann_bc ((int) uu - sx, nx, out); my = neumann_bc ((int) vv - sx, ny, out); dx = neumann_bc ((int) uu + sx, nx, out); dy = neumann_bc ((int) vv + sy, ny, out); ddx = neumann_bc ((int) uu + 2 * sx, nx, out); ddy = neumann_bc ((int) vv + 2 * sy, ny, out); break; case 1: x = periodic_bc ((int) uu, nx, out); y = periodic_bc ((int) vv, ny, out); mx = periodic_bc ((int) uu - sx, nx, out); my = periodic_bc ((int) vv - sx, ny, out); dx = periodic_bc ((int) uu + sx, nx, out); dy = periodic_bc ((int) vv + sy, ny, out); ddx = periodic_bc ((int) uu + 2 * sx, nx, out); ddy = periodic_bc ((int) vv + 2 * sy, ny, out); break; case 2: x = symmetric_bc ((int) uu, nx, out); y = symmetric_bc ((int) vv, ny, out); mx = symmetric_bc ((int) uu - sx, nx, out); my = symmetric_bc ((int) vv - sx, ny, out); dx = symmetric_bc ((int) uu + sx, nx, out); dy = symmetric_bc ((int) vv + sy, ny, out); ddx = symmetric_bc ((int) uu + 2 * sx, nx, out); ddy = symmetric_bc ((int) vv + 2 * sy, ny, out); break; default: x = neumann_bc ((int) uu, nx, out); y = neumann_bc ((int) vv, ny, out); mx = neumann_bc ((int) uu - sx, nx, out); my = neumann_bc ((int) vv - sx, ny, out); dx = neumann_bc ((int) uu + sx, nx, out); dy = neumann_bc ((int) vv + sy, ny, out); ddx = neumann_bc ((int) uu + 2 * sx, nx, out); ddy = neumann_bc ((int) vv + 2 * sy, ny, out); break; } if (out && border_out) return 0.0; else { //obtain the interpolation points of the image const float p11 = input[mx + nx * my]; const float p12 = input[x + nx * my]; const float p13 = input[dx + nx * my]; const float p14 = input[ddx + nx * my]; const float p21 = input[mx + nx * y]; const float p22 = input[x + nx * y]; const float p23 = input[dx + nx * y]; const float p24 = input[ddx + nx * y]; const float p31 = input[mx + nx * dy]; const float p32 = input[x + nx * dy]; const float p33 = input[dx + nx * dy]; const float p34 = input[ddx + nx * dy]; const float p41 = input[mx + nx * ddy]; const float p42 = input[x + nx * ddy]; const float p43 = input[dx + nx * ddy]; const float p44 = input[ddx + nx * ddy]; //create array double pol[4][4] = { {p11, p21, p31, p41}, {p12, p22, p32, p42}, {p13, p23, p33, p43}, {p14, p24, p34, p44} }; //return interpolation return bicubic_interpolation (pol, (float) uu - x, (float) vv - y); } } /** * * Compute the bicubic interpolation of an image. * **/ void bicubic_interpolation (const float *input, //image to be warped const float *u, //x component of the vector field const float *v, //y component of the vector field float *output, //warped output image with bicubic interpolation const int nx, //width of the image const int ny, //height of the image bool border_out = false //if true, put zeros outside the region ) { #pragma omp parallel for for (int i = 0; i < ny; i++) for (int j = 0; j < nx; j++) { const int p = i * nx + j; const float uu = (float) (j + u[p]); const float vv = (float) (i + v[p]); //obtain the bicubic interpolation at position (uu, vv) output[p] = bicubic_interpolation (input, uu, vv, nx, ny, border_out); } } #endif
GB_unaryop__ainv_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_uint8 // op(A') function: GB_tran__ainv_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_uint8 ( uint16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nanort.h
// // NanoRT, single header only modern ray tracing kernel. // // // Notes : The number of primitives are up to 2G. If you want to render large // data, please split data into chunks(~ 2G prims) and use NanoSG scene graph // library(`${nanort}/examples/nanosg`). // /* The MIT License (MIT) Copyright (c) 2015 - 2018 Light Transport Entertainment, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef NANORT_H_ #define NANORT_H_ #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> // compiler macros // // NANORT_USE_CPP11_FEATURE : Enable C++11 feature // NANORT_ENABLE_PARALLEL_BUILD : Enable parallel BVH build. // NANORT_ENABLE_SERIALIZATION : Enable serialization feature for built BVH. // // Parallelized BVH build is supported on C++11 thread version. // OpenMP version is not fully tested. // thus turn off if you face a problem when building BVH in parallel. // #define NANORT_ENABLE_PARALLEL_BUILD // Some constants #define kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD (1024 * 8) #define kNANORT_SHALLOW_DEPTH (4) // will create 2**N subtrees #ifdef NANORT_USE_CPP11_FEATURE // Assume C++11 compiler has thread support. // In some situation(e.g. embedded system, JIT compilation), thread feature // may not be available though... #include <atomic> #include <mutex> #include <thread> #define kNANORT_MAX_THREADS (256) // Parallel build should work well for C++11 version, thus force enable it. #ifndef NANORT_ENABLE_PARALLEL_BUILD #define NANORT_ENABLE_PARALLEL_BUILD #endif #endif namespace nanort { // RayType typedef enum { RAY_TYPE_NONE = 0x0, RAY_TYPE_PRIMARY = 0x1, RAY_TYPE_SECONDARY = 0x2, RAY_TYPE_DIFFUSE = 0x4, RAY_TYPE_REFLECTION = 0x8, RAY_TYPE_REFRACTION = 0x10 } RayType; #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif // ---------------------------------------------------------------------------- // Small vector class useful for multi-threaded environment. // // stack_container.h // // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This allocator can be used with STL containers to provide a stack buffer // from which to allocate memory and overflows onto the heap. This stack buffer // would be allocated on the stack and allows us to avoid heap operations in // some situations. // // STL likes to make copies of allocators, so the allocator itself can't hold // the data. Instead, we make the creator responsible for creating a // StackAllocator::Source which contains the data. Copying the allocator // merely copies the pointer to this shared source, so all allocators created // based on our allocator will share the same stack buffer. // // This stack buffer implementation is very simple. The first allocation that // fits in the stack buffer will use the stack buffer. Any subsequent // allocations will not use the stack buffer, even if there is unused room. // This makes it appropriate for array-like containers, but the caller should // be sure to reserve() in the container up to the stack buffer size. Otherwise // the container will allocate a small array which will "use up" the stack // buffer. template <typename T, size_t stack_capacity> class StackAllocator : public std::allocator<T> { public: typedef typename std::allocator<T>::pointer pointer; typedef typename std::allocator<T>::size_type size_type; // Backing store for the allocator. The container owner is responsible for // maintaining this for as long as any containers using this allocator are // live. struct Source { Source() : used_stack_buffer_(false) {} // Casts the buffer in its right type. T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); } const T *stack_buffer() const { return reinterpret_cast<const T *>(stack_buffer_); } // // IMPORTANT: Take care to ensure that stack_buffer_ is aligned // since it is used to mimic an array of T. // Be careful while declaring any unaligned types (like bool) // before stack_buffer_. // // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. char stack_buffer_[sizeof(T[stack_capacity])]; // Set when the stack buffer is used for an allocation. We do not track // how much of the buffer is used, only that somebody is using it. bool used_stack_buffer_; }; // Used by containers when they want to refer to an allocator of type U. template <typename U> struct rebind { typedef StackAllocator<U, stack_capacity> other; }; // For the straight up copy c-tor, we can share storage. StackAllocator(const StackAllocator<T, stack_capacity> &rhs) : source_(rhs.source_) {} // ISO C++ requires the following constructor to be defined, // and std::vector in VC++2008SP1 Release fails with an error // in the class _Container_base_aux_alloc_real (from <xutility>) // if the constructor does not exist. // For this constructor, we cannot share storage; there's // no guarantee that the Source buffer of Ts is large enough // for Us. // TODO(Google): If we were fancy pants, perhaps we could share storage // iff sizeof(T) == sizeof(U). template <typename U, size_t other_capacity> StackAllocator(const StackAllocator<U, other_capacity> &other) : source_(NULL) { (void)other; } explicit StackAllocator(Source *source) : source_(source) {} // Actually do the allocation. Use the stack buffer if nobody has used it yet // and the size requested fits. Otherwise, fall through to the standard // allocator. pointer allocate(size_type n, void *hint = 0) { if (source_ != NULL && !source_->used_stack_buffer_ && n <= stack_capacity) { source_->used_stack_buffer_ = true; return source_->stack_buffer(); } else { return std::allocator<T>::allocate(n, hint); } } // Free: when trying to free the stack buffer, just mark it as free. For // non-stack-buffer pointers, just fall though to the standard allocator. void deallocate(pointer p, size_type n) { if (source_ != NULL && p == source_->stack_buffer()) source_->used_stack_buffer_ = false; else std::allocator<T>::deallocate(p, n); } private: Source *source_; }; // A wrapper around STL containers that maintains a stack-sized buffer that the // initial capacity of the vector is based on. Growing the container beyond the // stack capacity will transparently overflow onto the heap. The container must // support reserve(). // // WATCH OUT: the ContainerType MUST use the proper StackAllocator for this // type. This object is really intended to be used only internally. You'll want // to use the wrappers below for different types. template <typename TContainerType, int stack_capacity> class StackContainer { public: typedef TContainerType ContainerType; typedef typename ContainerType::value_type ContainedType; typedef StackAllocator<ContainedType, stack_capacity> Allocator; // Allocator must be constructed before the container! StackContainer() : allocator_(&stack_data_), container_(allocator_) { // Make the container use the stack allocation by reserving our buffer size // before doing anything else. container_.reserve(stack_capacity); } // Getters for the actual container. // // Danger: any copies of this made using the copy constructor must have // shorter lifetimes than the source. The copy will share the same allocator // and therefore the same stack buffer as the original. Use std::copy to // copy into a "real" container for longer-lived objects. ContainerType &container() { return container_; } const ContainerType &container() const { return container_; } // Support operator-> to get to the container. This allows nicer syntax like: // StackContainer<...> foo; // std::sort(foo->begin(), foo->end()); ContainerType *operator->() { return &container_; } const ContainerType *operator->() const { return &container_; } #ifdef UNIT_TEST // Retrieves the stack source so that that unit tests can verify that the // buffer is being used properly. const typename Allocator::Source &stack_data() const { return stack_data_; } #endif protected: typename Allocator::Source stack_data_; unsigned char pad_[7]; Allocator allocator_; ContainerType container_; // DISALLOW_EVIL_CONSTRUCTORS(StackContainer); StackContainer(const StackContainer &); void operator=(const StackContainer &); }; // StackVector // // Example: // StackVector<int, 16> foo; // foo->push_back(22); // we have overloaded operator-> // foo[0] = 10; // as well as operator[] template <typename T, size_t stack_capacity> class StackVector : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity> { public: StackVector() : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() {} // We need to put this in STL containers sometimes, which requires a copy // constructor. We can't call the regular copy constructor because that will // take the stack buffer from the original. Here, we create an empty object // and make a stack buffer of its own. StackVector(const StackVector<T, stack_capacity> &other) : StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >, stack_capacity>() { this->container().assign(other->begin(), other->end()); } StackVector<T, stack_capacity> &operator=( const StackVector<T, stack_capacity> &other) { this->container().assign(other->begin(), other->end()); return *this; } // Vectors are commonly indexed, which isn't very convenient even with // operator-> (using "->at()" does exception stuff we don't want). T &operator[](size_t i) { return this->container().operator[](i); } const T &operator[](size_t i) const { return this->container().operator[](i); } }; // ---------------------------------------------------------------------------- template <typename T = float> class real3 { public: real3() {} real3(T x) { v[0] = x; v[1] = x; v[2] = x; } real3(T xx, T yy, T zz) { v[0] = xx; v[1] = yy; v[2] = zz; } explicit real3(const T *p) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; } inline T x() const { return v[0]; } inline T y() const { return v[1]; } inline T z() const { return v[2]; } real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); } real3 operator-(const real3 &f2) const { return real3(x() - f2.x(), y() - f2.y(), z() - f2.z()); } real3 operator*(const real3 &f2) const { return real3(x() * f2.x(), y() * f2.y(), z() * f2.z()); } real3 operator+(const real3 &f2) const { return real3(x() + f2.x(), y() + f2.y(), z() + f2.z()); } real3 &operator+=(const real3 &f2) { v[0] += f2.x(); v[1] += f2.y(); v[2] += f2.z(); return (*this); } real3 operator/(const real3 &f2) const { return real3(x() / f2.x(), y() / f2.y(), z() / f2.z()); } real3 operator-() const { return real3(-x(), -y(), -z()); } T operator[](int i) const { return v[i]; } T &operator[](int i) { return v[i]; } T v[3]; // T pad; // for alignment(when T = float) }; template <typename T> inline real3<T> operator*(T f, const real3<T> &v) { return real3<T>(v.x() * f, v.y() * f, v.z() * f); } template <typename T> inline real3<T> vneg(const real3<T> &rhs) { return real3<T>(-rhs.x(), -rhs.y(), -rhs.z()); } template <typename T> inline T vlength(const real3<T> &rhs) { return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z()); } template <typename T> inline real3<T> vnormalize(const real3<T> &rhs) { real3<T> v = rhs; T len = vlength(rhs); if (std::fabs(len) > std::numeric_limits<T>::epsilon()) { T inv_len = static_cast<T>(1.0) / len; v.v[0] *= inv_len; v.v[1] *= inv_len; v.v[2] *= inv_len; } return v; } template <typename T> inline real3<T> vcross(const real3<T> a, const real3<T> b) { real3<T> c; c[0] = a[1] * b[2] - a[2] * b[1]; c[1] = a[2] * b[0] - a[0] * b[2]; c[2] = a[0] * b[1] - a[1] * b[0]; return c; } template <typename T> inline T vdot(const real3<T> a, const real3<T> b) { return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; } template <typename T> inline real3<T> vsafe_inverse(const real3<T> v) { real3<T> r; #ifdef NANORT_USE_CPP11_FEATURE if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { r[0] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[0]); } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { r[1] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[1]); } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { r[2] = std::numeric_limits<T>::infinity() * std::copysign(static_cast<T>(1), v[2]); } else { r[2] = static_cast<T>(1.0) / v[2]; } #else if (std::fabs(v[0]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[0] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[0] = std::numeric_limits<T>::infinity() * sgn; } else { r[0] = static_cast<T>(1.0) / v[0]; } if (std::fabs(v[1]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[1] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[1] = std::numeric_limits<T>::infinity() * sgn; } else { r[1] = static_cast<T>(1.0) / v[1]; } if (std::fabs(v[2]) < std::numeric_limits<T>::epsilon()) { T sgn = (v[2] < static_cast<T>(0)) ? static_cast<T>(-1) : static_cast<T>(1); r[2] = std::numeric_limits<T>::infinity() * sgn; } else { r[2] = static_cast<T>(1.0) / v[2]; } #endif return r; } template <typename real> inline const real *get_vertex_addr(const real *p, const size_t idx, const size_t stride_bytes) { return reinterpret_cast<const real *>( reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes); } template <typename T = float> class Ray { public: Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()), type(RAY_TYPE_NONE) { org[0] = static_cast<T>(0.0); org[1] = static_cast<T>(0.0); org[2] = static_cast<T>(0.0); dir[0] = static_cast<T>(0.0); dir[1] = static_cast<T>(0.0); dir[2] = static_cast<T>(-1.0); } T org[3]; // must set T dir[3]; // must set T min_t; // minimum ray hit distance. T max_t; // maximum ray hit distance. T inv_dir[3]; // filled internally int dir_sign[3]; // filled internally unsigned int type; // ray type // TODO(LTE): Align sizeof(Ray) }; template <typename T = float> class BVHNode { public: BVHNode() {} BVHNode(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; } BVHNode &operator=(const BVHNode &rhs) { bmin[0] = rhs.bmin[0]; bmin[1] = rhs.bmin[1]; bmin[2] = rhs.bmin[2]; flag = rhs.flag; bmax[0] = rhs.bmax[0]; bmax[1] = rhs.bmax[1]; bmax[2] = rhs.bmax[2]; axis = rhs.axis; data[0] = rhs.data[0]; data[1] = rhs.data[1]; return (*this); } ~BVHNode() {} T bmin[3]; T bmax[3]; int flag; // 1 = leaf node, 0 = branch node int axis; // leaf // data[0] = npoints // data[1] = index // // branch // data[0] = child[0] // data[1] = child[1] unsigned int data[2]; }; template <class H> class IntersectComparator { public: bool operator()(const H &a, const H &b) const { return a.t < b.t; } }; /// BVH build option. template <typename T = float> struct BVHBuildOptions { T cost_t_aabb; unsigned int min_leaf_primitives; unsigned int max_tree_depth; unsigned int bin_size; unsigned int shallow_depth; unsigned int min_primitives_for_parallel_build; // Cache bounding box computation. // Requires more memory, but BVHbuild can be faster. bool cache_bbox; unsigned char pad[3]; // Set default value: Taabb = 0.2 BVHBuildOptions() : cost_t_aabb(static_cast<T>(0.2)), min_leaf_primitives(4), max_tree_depth(256), bin_size(64), shallow_depth(kNANORT_SHALLOW_DEPTH), min_primitives_for_parallel_build( kNANORT_MIN_PRIMITIVES_FOR_PARALLEL_BUILD), cache_bbox(false) {} }; /// BVH build statistics. class BVHBuildStatistics { public: unsigned int max_tree_depth; unsigned int num_leaf_nodes; unsigned int num_branch_nodes; float build_secs; // Set default value: Taabb = 0.2 BVHBuildStatistics() : max_tree_depth(0), num_leaf_nodes(0), num_branch_nodes(0), build_secs(0.0f) {} }; /// BVH trace option. class BVHTraceOptions { public: // Hit only for face IDs in indexRange. // This feature is good to mimic something like glDrawArrays() unsigned int prim_ids_range[2]; // Prim ID to skip for avoiding self-intersection // -1 = no skipping unsigned int skip_prim_id; bool cull_back_face; unsigned char pad[3]; ///< Padding(not used) BVHTraceOptions() { prim_ids_range[0] = 0; prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs. skip_prim_id = static_cast<unsigned int>(-1); cull_back_face = false; } }; template <typename T> class BBox { public: real3<T> bmin; real3<T> bmax; BBox() { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } }; template <typename T> class NodeHit { public: NodeHit() : t_min(std::numeric_limits<T>::max()), t_max(-std::numeric_limits<T>::max()), node_id(static_cast<unsigned int>(-1)) {} NodeHit(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; } NodeHit &operator=(const NodeHit<T> &rhs) { t_min = rhs.t_min; t_max = rhs.t_max; node_id = rhs.node_id; return (*this); } ~NodeHit() {} T t_min; T t_max; unsigned int node_id; }; template <typename T> class NodeHitComparator { public: inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) { return a.t_min < b.t_min; } }; template <typename T> class BVHAccel { public: BVHAccel() : pad0_(0) { (void)pad0_; } ~BVHAccel() {} /// /// Build BVH for input primitives. /// template <class P, class Pred> bool Build(const unsigned int num_primitives, const P &p, const Pred &pred, const BVHBuildOptions<T> &options = BVHBuildOptions<T>()); /// /// Get statistics of built BVH tree. Valid after Build() /// BVHBuildStatistics GetStatistics() const { return stats_; } #if defined(NANORT_ENABLE_SERIALIZATION) /// /// Dump built BVH to the file. /// bool Dump(const char *filename) const; bool Dump(FILE *fp) const; /// /// Load BVH binary /// bool Load(const char *filename); bool Load(FILE *fp); #endif void Debug(); /// /// Traverse into BVH along ray and find closest hit point & primitive if /// found /// template <class I, class H> bool Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options = BVHTraceOptions()) const; #if 0 /// Multi-hit ray traversal /// Returns `max_intersections` frontmost intersections template<class I, class H, class Comp> bool MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *isects, const BVHTraceOptions &options = BVHTraceOptions()) const; #endif /// /// List up nodes which intersects along the ray. /// This function is useful for two-level BVH traversal. /// template <class I> bool ListNodeIntersections(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const; const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; } const std::vector<unsigned int> &GetIndices() const { return indices_; } /// /// Returns bounding box of built BVH. /// void BoundingBox(T bmin[3], T bmax[3]) const { if (nodes_.empty()) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); } else { bmin[0] = nodes_[0].bmin[0]; bmin[1] = nodes_[0].bmin[1]; bmin[2] = nodes_[0].bmin[2]; bmax[0] = nodes_[0].bmax[0]; bmax[1] = nodes_[0].bmax[1]; bmax[2] = nodes_[0].bmax[2]; } } bool IsValid() const { return nodes_.size() > 0; } private: #if defined(NANORT_ENABLE_PARALLEL_BUILD) typedef struct { unsigned int left_idx; unsigned int right_idx; unsigned int offset; } ShallowNodeInfo; // Used only during BVH construction std::vector<ShallowNodeInfo> shallow_node_infos_; /// Builds shallow BVH tree recursively. template <class P, class Pred> unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred); #endif /// Builds BVH tree recursively. template <class P, class Pred> unsigned int BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred); template <class I> bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; template <class I> bool TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const; #if 0 template<class I, class H, class Comp> bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const; #endif std::vector<BVHNode<T> > nodes_; std::vector<unsigned int> indices_; // max 4G triangles. std::vector<BBox<T> > bboxes_; BVHBuildOptions<T> options_; BVHBuildStatistics stats_; unsigned int pad0_; }; // Predefined SAH predicator for triangle. template <typename T = float> class TriangleSAHPred { public: TriangleSAHPred( const T *vertices, const unsigned int *faces, size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : axis_(0), pos_(static_cast<T>(0.0)), vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} TriangleSAHPred(const TriangleSAHPred<T> &rhs) : axis_(rhs.axis_), pos_(rhs.pos_), vertices_(rhs.vertices_), faces_(rhs.faces_), vertex_stride_bytes_(rhs.vertex_stride_bytes_) {} TriangleSAHPred<T> &operator=(const TriangleSAHPred<T> &rhs) { axis_ = rhs.axis_; pos_ = rhs.pos_; vertices_ = rhs.vertices_; faces_ = rhs.faces_; vertex_stride_bytes_ = rhs.vertex_stride_bytes_; return (*this); } void Set(int axis, T pos) const { axis_ = axis; pos_ = pos; } bool operator()(unsigned int i) const { int axis = axis_; T pos = pos_; unsigned int i0 = faces_[3 * i + 0]; unsigned int i1 = faces_[3 * i + 1]; unsigned int i2 = faces_[3 * i + 2]; real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_)); real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_)); real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_)); T center = p0[axis] + p1[axis] + p2[axis]; return (center < pos * static_cast<T>(3.0)); } private: mutable int axis_; mutable T pos_; const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; // Predefined Triangle mesh geometry. template <typename T = float> class TriangleMesh { public: TriangleMesh( const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} /// Compute bounding box for `prim_index`th triangle. /// This function is called for each primitive in BVH build. void BoundingBox(real3<T> *bmin, real3<T> *bmax, unsigned int prim_index) const { (*bmin)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[0]; (*bmin)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[1]; (*bmin)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[2]; (*bmax)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[0]; (*bmax)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[1]; (*bmax)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0], vertex_stride_bytes_)[2]; for (unsigned int i = 1; i < 3; i++) { for (unsigned int k = 0; k < 3; k++) { if ((*bmin)[static_cast<int>(k)] > get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]) { (*bmin)[static_cast<int>(k)] = get_vertex_addr<T>( vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; } if ((*bmax)[static_cast<int>(k)] < get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]) { (*bmax)[static_cast<int>(k)] = get_vertex_addr<T>( vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k]; } } } } const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; }; template <typename T = float> class TriangleIntersection { public: T u; T v; // Required member variables. T t; unsigned int prim_id; }; template <typename T = float, class H = TriangleIntersection<T> > class TriangleIntersector { public: TriangleIntersector(const T *vertices, const unsigned int *faces, const size_t vertex_stride_bytes) // e.g. // vertex_stride_bytes // = 12 = sizeof(float) // * 3 : vertices_(vertices), faces_(faces), vertex_stride_bytes_(vertex_stride_bytes) {} // For Watertight Ray/Triangle Intersection. typedef struct { T Sx; T Sy; T Sz; int kx; int ky; int kz; } RayCoeff; /// Do ray interesection stuff for `prim_index` th primitive and return hit /// distance `t`, /// varycentric coordinate `u` and `v`. /// Returns true if there's intersection. bool Intersect(T *t_inout, const unsigned int prim_index) const { if ((prim_index < trace_options_.prim_ids_range[0]) || (prim_index >= trace_options_.prim_ids_range[1])) { return false; } // Self-intersection test. if (prim_index == trace_options_.skip_prim_id) { return false; } const unsigned int f0 = faces_[3 * prim_index + 0]; const unsigned int f1 = faces_[3 * prim_index + 1]; const unsigned int f2 = faces_[3 * prim_index + 2]; const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_)); const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_)); const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_)); const real3<T> A = p0 - ray_org_; const real3<T> B = p1 - ray_org_; const real3<T> C = p2 - ray_org_; const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz]; const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz]; const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz]; const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz]; const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz]; const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz]; T U = Cx * By - Cy * Bx; T V = Ax * Cy - Ay * Cx; T W = Bx * Ay - By * Ax; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wfloat-equal" #endif // Fall back to test against edges using double precision. if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) { double CxBy = static_cast<double>(Cx) * static_cast<double>(By); double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx); U = static_cast<T>(CxBy - CyBx); double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy); double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx); V = static_cast<T>(AxCy - AyCx); double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay); double ByAx = static_cast<double>(By) * static_cast<double>(Ax); W = static_cast<T>(BxAy - ByAx); } if (trace_options_.cull_back_face) { if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) return false; } else { if ((U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) && (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) { return false; } } T det = U + V + W; if (det == static_cast<T>(0.0)) return false; #ifdef __clang__ #pragma clang diagnostic pop #endif const T Az = ray_coeff_.Sz * A[ray_coeff_.kz]; const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz]; const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz]; const T D = U * Az + V * Bz + W * Cz; const T rcpDet = static_cast<T>(1.0) / det; T tt = D * rcpDet; if (tt > (*t_inout)) { return false; } if (tt < t_min_) { return false; } (*t_inout) = tt; // Use Thomas-Mueller style barycentric coord. // U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2 // We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2; // => u = V, v = W. u_ = V * rcpDet; v_ = W * rcpDet; return true; } /// Returns the nearest hit distance. T GetT() const { return t_; } /// Update is called when initializing intesection and nearest hit is found. void Update(T t, unsigned int prim_idx) const { t_ = t; prim_id_ = prim_idx; } /// Prepare BVH traversal(e.g. compute inverse ray direction) /// This function is called only once in BVH traversal. void PrepareTraversal(const Ray<T> &ray, const BVHTraceOptions &trace_options) const { ray_org_[0] = ray.org[0]; ray_org_[1] = ray.org[1]; ray_org_[2] = ray.org[2]; // Calculate dimension where the ray direction is maximal. ray_coeff_.kz = 0; T absDir = std::fabs(ray.dir[0]); if (absDir < std::fabs(ray.dir[1])) { ray_coeff_.kz = 1; absDir = std::fabs(ray.dir[1]); } if (absDir < std::fabs(ray.dir[2])) { ray_coeff_.kz = 2; absDir = std::fabs(ray.dir[2]); } ray_coeff_.kx = ray_coeff_.kz + 1; if (ray_coeff_.kx == 3) ray_coeff_.kx = 0; ray_coeff_.ky = ray_coeff_.kx + 1; if (ray_coeff_.ky == 3) ray_coeff_.ky = 0; // Swap kx and ky dimension to preserve widing direction of triangles. if (ray.dir[ray_coeff_.kz] < static_cast<T>(0.0)) std::swap(ray_coeff_.kx, ray_coeff_.ky); // Calculate shear constants. ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz]; ray_coeff_.Sz = static_cast<T>(1.0) / ray.dir[ray_coeff_.kz]; trace_options_ = trace_options; t_min_ = ray.min_t; u_ = static_cast<T>(0.0); v_ = static_cast<T>(0.0); } /// Post BVH traversal stuff. /// Fill `isect` if there is a hit. void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const { if (hit && isect) { (*isect).t = t_; (*isect).u = u_; (*isect).v = v_; (*isect).prim_id = prim_id_; } (void)ray; } private: const T *vertices_; const unsigned int *faces_; const size_t vertex_stride_bytes_; mutable real3<T> ray_org_; mutable RayCoeff ray_coeff_; mutable BVHTraceOptions trace_options_; mutable T t_min_; mutable T t_; mutable T u_; mutable T v_; mutable unsigned int prim_id_; }; // // Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf // // NaN-safe min and max function. template <class T> const T &safemin(const T &a, const T &b) { return (a < b) ? a : b; } template <class T> const T &safemax(const T &a, const T &b) { return (a > b) ? a : b; } // // SAH functions // struct BinBuffer { explicit BinBuffer(unsigned int size) { bin_size = size; bin.resize(2 * 3 * size); clear(); } void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); } std::vector<size_t> bin; // (min, max) * xyz * binsize unsigned int bin_size; unsigned int pad0; }; template <typename T> inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) { real3<T> box = max - min; return static_cast<T>(2.0) * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]); } template <typename T> inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax, const T *vertices, const unsigned int *faces, unsigned int index) { unsigned int f0 = faces[3 * index + 0]; unsigned int f1 = faces[3 * index + 1]; unsigned int f2 = faces[3 * index + 2]; real3<T> p[3]; p[0] = real3<T>(&vertices[3 * f0]); p[1] = real3<T>(&vertices[3 * f1]); p[2] = real3<T>(&vertices[3 * f2]); (*bmin) = p[0]; (*bmax) = p[0]; for (int i = 1; i < 3; i++) { (*bmin)[0] = std::min((*bmin)[0], p[i][0]); (*bmin)[1] = std::min((*bmin)[1], p[i][1]); (*bmin)[2] = std::min((*bmin)[2], p[i][2]); (*bmax)[0] = std::max((*bmax)[0], p[i][0]); (*bmax)[1] = std::max((*bmax)[1], p[i][1]); (*bmax)[2] = std::max((*bmax)[2], p[i][2]); } } template <typename T, class P> inline void ContributeBinBuffer(BinBuffer *bins, // [out] const real3<T> &scene_min, const real3<T> &scene_max, unsigned int *indices, unsigned int left_idx, unsigned int right_idx, const P &p) { T bin_size = static_cast<T>(bins->bin_size); // Calculate extent real3<T> scene_size, scene_inv_size; scene_size = scene_max - scene_min; for (int i = 0; i < 3; ++i) { assert(scene_size[i] >= static_cast<T>(0.0)); if (scene_size[i] > static_cast<T>(0.0)) { scene_inv_size[i] = bin_size / scene_size[i]; } else { scene_inv_size[i] = static_cast<T>(0.0); } } // Clear bin data std::fill(bins->bin.begin(), bins->bin.end(), 0); // memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size)); size_t idx_bmin[3]; size_t idx_bmax[3]; for (size_t i = left_idx; i < right_idx; i++) { // // Quantize the position into [0, BIN_SIZE) // // q[i] = (int)(p[i] - scene_bmin) / scene_size // real3<T> bmin; real3<T> bmax; p.BoundingBox(&bmin, &bmax, indices[i]); // GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]); real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size; real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size; // idx is now in [0, BIN_SIZE) for (int j = 0; j < 3; ++j) { int q0 = static_cast<int>(quantized_bmin[j]); if (q0 < 0) q0 = 0; int q1 = static_cast<int>(quantized_bmax[j]); if (q1 < 0) q1 = 0; idx_bmin[j] = static_cast<unsigned int>(q0); idx_bmax[j] = static_cast<unsigned int>(q1); if (idx_bmin[j] >= bin_size) idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1; if (idx_bmax[j] >= bin_size) idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1; assert(idx_bmin[j] < bin_size); assert(idx_bmax[j] < bin_size); // Increment bin counter bins->bin[0 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1; bins->bin[1 * (bins->bin_size * 3) + static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1; } } } template <typename T> inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb, T Ttri) { T sah; sah = static_cast<T>(2.0) * Taabb + (leftArea * invS) * static_cast<T>(ns1) * Ttri + (rightArea * invS) * static_cast<T>(ns2) * Ttri; return sah; } template <typename T> inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz int *minCostAxis, // [out] const BinBuffer *bins, const real3<T> &bmin, const real3<T> &bmax, size_t num_primitives, T costTaabb) { // should be in [0.0, 1.0] const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale; size_t left, right; real3<T> bsize, bstep; real3<T> bminLeft, bmaxLeft; real3<T> bminRight, bmaxRight; T saLeft, saRight, saTotal; T pos; T minCost[3]; T costTtri = static_cast<T>(1.0) - costTaabb; (*minCostAxis) = 0; bsize = bmax - bmin; bstep = bsize * (static_cast<T>(1.0) / bins->bin_size); saTotal = CalculateSurfaceArea(bmin, bmax); T invSaTotal = static_cast<T>(0.0); if (saTotal > kEPS) { invSaTotal = static_cast<T>(1.0) / saTotal; } for (int j = 0; j < 3; ++j) { // // Compute SAH cost for the right side of each cell of the bbox. // Exclude both extreme side of the bbox. // // i: 0 1 2 3 // +----+----+----+----+----+ // | | | | | | // +----+----+----+----+----+ // T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j]; minCost[j] = std::numeric_limits<T>::max(); left = 0; right = num_primitives; bminLeft = bminRight = bmin; bmaxLeft = bmaxRight = bmax; for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) { left += bins->bin[0 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; right -= bins->bin[1 * (3 * bins->bin_size) + static_cast<size_t>(j) * bins->bin_size + static_cast<size_t>(i)]; assert(left <= num_primitives); assert(right <= num_primitives); // // Split pos bmin + (i + 1) * (bsize / BIN_SIZE) // +1 for i since we want a position on right side of the cell. // pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j]; bmaxLeft[j] = pos; bminRight[j] = pos; saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft); saRight = CalculateSurfaceArea(bminRight, bmaxRight); T cost = SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri); if (cost < minCost[j]) { // // Update the min cost // minCost[j] = cost; minCostPos = pos; // minCostAxis = j; } } cut_pos[j] = minCostPos; } // cut_axis = minCostAxis; // cut_pos = minCostPos; // Find min cost axis T cost = minCost[0]; (*minCostAxis) = 0; if (cost > minCost[1]) { (*minCostAxis) = 1; cost = minCost[1]; } if (cost > minCost[2]) { (*minCostAxis) = 2; cost = minCost[2]; } return true; } #ifdef _OPENMP template <typename T, class P> void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { p.BoundingBox(bmin, bmax, indices[left_index]); } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; unsigned int n = right_index - left_index; #pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128)) { #pragma omp parallel for for (int i = int(left_index); i < int(right_index); i++) { // for each faces unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } #pragma omp critical { for (int k = 0; k < 3; k++) { if (local_bmin[k] < (*bmin)[k]) { { if (local_bmin[k] < (*bmin)[k]) (*bmin)[k] = local_bmin[k]; } } if (local_bmax[k] > (*bmax)[k]) { { if (local_bmax[k] > (*bmax)[k]) (*bmax)[k] = local_bmax[k]; } } } } } } #endif #ifdef NANORT_USE_CPP11_FEATURE template <typename T, class P> inline void ComputeBoundingBoxThreaded(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { unsigned int n = right_index - left_index; size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; std::vector<T> local_bmins(3 * num_threads); // 3 = xyz std::vector<T> local_bmaxs(3 * num_threads); // 3 = xyz for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = left_index + t * ndiv; size_t ei = std::min(left_index + (t + 1) * ndiv, size_t(right_index)); local_bmins[3 * t + 0] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 1] = std::numeric_limits<T>::infinity(); local_bmins[3 * t + 2] = std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 0] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 1] = -std::numeric_limits<T>::infinity(); local_bmaxs[3 * t + 2] = -std::numeric_limits<T>::infinity(); for (size_t i = si; i < ei; i++) { // for each faces unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (size_t k = 0; k < 3; k++) { // xyz if (local_bmins[3 * t + k] > bbox_min[int(k)]) local_bmins[3 * t + k] = bbox_min[int(k)]; if (local_bmaxs[3 * t + k] < bbox_max[int(k)]) local_bmaxs[3 * t + k] = bbox_max[int(k)]; } } })); } for (auto &t : workers) { t.join(); } // merge bbox for (size_t k = 0; k < 3; k++) { (*bmin)[int(k)] = local_bmins[k]; (*bmax)[int(k)] = local_bmaxs[k]; } for (size_t t = 1; t < num_threads; t++) { for (size_t k = 0; k < 3; k++) { if (local_bmins[3 * t + k] < (*bmin)[int(k)]) { (*bmin)[int(k)] = local_bmins[3 * t + k]; } if (local_bmaxs[3 * t + k] > (*bmax)[int(k)]) { (*bmax)[int(k)] = local_bmaxs[3 * t + k]; } } } } #endif template <typename T, class P> inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax, const unsigned int *indices, unsigned int left_index, unsigned int right_index, const P &p) { { unsigned int idx = indices[left_index]; p.BoundingBox(bmin, bmax, idx); } { for (unsigned int i = left_index + 1; i < right_index; i++) { // for each primitives unsigned int idx = indices[i]; real3<T> bbox_min, bbox_max; p.BoundingBox(&bbox_min, &bbox_max, idx); for (int k = 0; k < 3; k++) { // xyz if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k]; if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k]; } } } } template <typename T> inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax, const std::vector<BBox<T> > &bboxes, unsigned int *indices, unsigned int left_index, unsigned int right_index) { { unsigned int i = left_index; unsigned int idx = indices[i]; (*bmin)[0] = bboxes[idx].bmin[0]; (*bmin)[1] = bboxes[idx].bmin[1]; (*bmin)[2] = bboxes[idx].bmin[2]; (*bmax)[0] = bboxes[idx].bmax[0]; (*bmax)[1] = bboxes[idx].bmax[1]; (*bmax)[2] = bboxes[idx].bmax[2]; } T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]}; T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]}; { for (unsigned int i = left_index; i < right_index; i++) { // for each faces unsigned int idx = indices[i]; for (int k = 0; k < 3; k++) { // xyz T minval = bboxes[idx].bmin[k]; T maxval = bboxes[idx].bmax[k]; if (local_bmin[k] > minval) local_bmin[k] = minval; if (local_bmax[k] < maxval) local_bmax[k] = maxval; } } for (int k = 0; k < 3; k++) { (*bmin)[k] = local_bmin[k]; (*bmax)[k] = local_bmax[k]; } } } // // -- // #if defined(NANORT_ENABLE_PARALLEL_BUILD) template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, unsigned int max_shallow_depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (stats_.max_tree_depth < depth) { stats_.max_tree_depth = depth; } real3<T> bmin, bmax; #if defined(NANORT_USE_CPP11_FEATURE) && defined(NANORT_ENABLE_PARALLEL_BUILD) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); #endif unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update stats_.num_leaf_nodes++; return offset; } // // Create branch node. // if (depth >= max_shallow_depth) { // Delay to build tree ShallowNodeInfo info; info.left_idx = left_idx; info.right_idx = right_idx; info.offset = offset; shallow_node_infos_.push_back(info); // Add dummy node. BVHNode<T> node; node.axis = -1; node.flag = -1; out_nodes->push_back(node); return offset; } else { // // TODO(LTE): multi-threaded SAH computation, or use simple object median or // spacial median for shallow tree to speeding up the parallel build. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; // @fixme { We want something like: std::partition(begin, end, // pred(cut_axis, cut_pos[cut_axis])); } pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis if there's axis to try. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1, max_shallow_depth, p, pred); right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx, depth + 1, max_shallow_depth, p, pred); (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } stats_.num_branch_nodes++; return offset; } #endif template <typename T> template <class P, class Pred> unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat, std::vector<BVHNode<T> > *out_nodes, unsigned int left_idx, unsigned int right_idx, unsigned int depth, const P &p, const Pred &pred) { assert(left_idx <= right_idx); unsigned int offset = static_cast<unsigned int>(out_nodes->size()); if (out_stat->max_tree_depth < depth) { out_stat->max_tree_depth = depth; } real3<T> bmin, bmax; if (!bboxes_.empty()) { GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx); } else { ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p); } unsigned int n = right_idx - left_idx; if ((n <= options_.min_leaf_primitives) || (depth >= options_.max_tree_depth)) { // Create leaf node. BVHNode<T> leaf; leaf.bmin[0] = bmin[0]; leaf.bmin[1] = bmin[1]; leaf.bmin[2] = bmin[2]; leaf.bmax[0] = bmax[0]; leaf.bmax[1] = bmax[1]; leaf.bmax[2] = bmax[2]; assert(left_idx < std::numeric_limits<unsigned int>::max()); leaf.flag = 1; // leaf leaf.data[0] = n; leaf.data[1] = left_idx; out_nodes->push_back(leaf); // atomic update out_stat->num_leaf_nodes++; return offset; } // // Create branch node. // // // Compute SAH and find best split axis and position // int min_cut_axis = 0; T cut_pos[3] = {0.0, 0.0, 0.0}; BinBuffer bins(options_.bin_size); ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx, p); FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n, options_.cost_t_aabb); // Try all 3 axis until good cut position avaiable. unsigned int mid_idx = left_idx; int cut_axis = min_cut_axis; for (int axis_try = 0; axis_try < 3; axis_try++) { unsigned int *begin = &indices_[left_idx]; unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator. unsigned int *mid = 0; // try min_cut_axis first. cut_axis = (min_cut_axis + axis_try) % 3; pred.Set(cut_axis, cut_pos[cut_axis]); // // Split at (cut_axis, cut_pos) // indices_ will be modified. // mid = std::partition(begin, end, pred); mid_idx = left_idx + static_cast<unsigned int>((mid - begin)); if ((mid_idx == left_idx) || (mid_idx == right_idx)) { // Can't split well. // Switch to object median(which may create unoptimized tree, but // stable) mid_idx = left_idx + (n >> 1); // Try another axis to find better cut. } else { // Found good cut. exit loop. break; } } BVHNode<T> node; node.axis = cut_axis; node.flag = 0; // 0 = branch out_nodes->push_back(node); unsigned int left_child_index = 0; unsigned int right_child_index = 0; left_child_index = BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred); right_child_index = BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred); { (*out_nodes)[offset].data[0] = left_child_index; (*out_nodes)[offset].data[1] = right_child_index; (*out_nodes)[offset].bmin[0] = bmin[0]; (*out_nodes)[offset].bmin[1] = bmin[1]; (*out_nodes)[offset].bmin[2] = bmin[2]; (*out_nodes)[offset].bmax[0] = bmax[0]; (*out_nodes)[offset].bmax[1] = bmax[1]; (*out_nodes)[offset].bmax[2] = bmax[2]; } out_stat->num_branch_nodes++; return offset; } template <typename T> template <class P, class Pred> bool BVHAccel<T>::Build(unsigned int num_primitives, const P &p, const Pred &pred, const BVHBuildOptions<T> &options) { options_ = options; stats_ = BVHBuildStatistics(); nodes_.clear(); bboxes_.clear(); assert(options_.bin_size > 1); if (num_primitives == 0) { return false; } unsigned int n = num_primitives; // // 1. Create triangle indices(this will be permutated in BuildTree) // indices_.resize(n); #if defined(NANORT_USE_CPP11_FEATURE) { size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (n < num_threads) { num_threads = n; } std::vector<std::thread> workers; size_t ndiv = n / num_threads; for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&, t]() { size_t si = t * ndiv; size_t ei = std::min((t + 1) * ndiv, size_t(n)); for (size_t k = si; k < ei; k++) { indices_[k] = static_cast<unsigned int>(k); } })); } for (auto &t : workers) { t.join(); } } #else #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < static_cast<int>(n); i++) { indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i); } #endif // !NANORT_USE_CPP11_FEATURE // // 2. Compute bounding box(optional). // real3<T> bmin, bmax; if (options.cache_bbox) { bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max(); bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max(); bboxes_.resize(n); for (size_t i = 0; i < n; i++) { // for each primitived unsigned int idx = indices_[i]; BBox<T> bbox; p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i)); bboxes_[idx] = bbox; for (int k = 0; k < 3; k++) { // xyz if (bmin[k] > bbox.bmin[k]) { bmin[k] = bbox.bmin[k]; } if (bmax[k] < bbox.bmax[k]) { bmax[k] = bbox.bmax[k]; } } } } else { #if defined(NANORT_USE_CPP11_FEATURE) ComputeBoundingBoxThreaded(&bmin, &bmax, &indices_.at(0), 0, n, p); #elif defined(_OPENMP) ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p); #else ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p); #endif } // // 3. Build tree // #if defined(NANORT_ENABLE_PARALLEL_BUILD) #if defined(NANORT_USE_CPP11_FEATURE) // Do parallel build for enoughly large dataset. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); size_t num_threads = std::min( size_t(kNANORT_MAX_THREADS), std::max(size_t(1), size_t(std::thread::hardware_concurrency()))); if (shallow_node_infos_.size() < num_threads) { num_threads = shallow_node_infos_.size(); } std::vector<std::thread> workers; std::atomic<uint32_t> i(0); for (size_t t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { uint32_t idx = 0; while ((idx = (i++)) < shallow_node_infos_.size()) { // Create thread-local copy of Pred since some mutable variables are // modified during SAH computation. const Pred local_pred = pred; unsigned int left_idx = shallow_node_infos_[size_t(idx)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(idx)].right_idx; BuildTree(&(local_stats[size_t(idx)]), &(local_nodes[size_t(idx)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } })); } for (auto &t : workers) { t.join(); } // Join local nodes for (size_t ii = 0; ii < local_nodes.size(); ii++) { assert(!local_nodes[ii].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[ii].size(); j++) { if (local_nodes[ii][j].flag == 0) { // branch local_nodes[ii][j].data[0] += offset - 1; local_nodes[ii][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[ii].offset] = local_nodes[ii][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[ii].begin() + 1, local_nodes[ii].end()); } // Join statistics for (size_t ii = 0; ii < local_nodes.size(); ii++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[ii].max_tree_depth); stats_.num_leaf_nodes += local_stats[ii].num_leaf_nodes; stats_.num_branch_nodes += local_stats[ii].num_branch_nodes; } } else { // Single thread. BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #elif defined(_OPENMP) // Do parallel build for enoughly large dataset. if (n > options.min_primitives_for_parallel_build) { BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth, p, pred); // [0, n) assert(shallow_node_infos_.size() > 0); // Build deeper tree in parallel std::vector<std::vector<BVHNode<T> > > local_nodes( shallow_node_infos_.size()); std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) { unsigned int left_idx = shallow_node_infos_[size_t(i)].left_idx; unsigned int right_idx = shallow_node_infos_[size_t(i)].right_idx; const Pred local_pred = pred; BuildTree(&(local_stats[size_t(i)]), &(local_nodes[size_t(i)]), left_idx, right_idx, options.shallow_depth, p, local_pred); } // Join local nodes for (size_t i = 0; i < local_nodes.size(); i++) { assert(!local_nodes[size_t(i)].empty()); size_t offset = nodes_.size(); // Add offset to child index(for branch node). for (size_t j = 0; j < local_nodes[i].size(); j++) { if (local_nodes[i][j].flag == 0) { // branch local_nodes[i][j].data[0] += offset - 1; local_nodes[i][j].data[1] += offset - 1; } } // replace nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0]; // Skip root element of the local node. nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1, local_nodes[i].end()); } // Join statistics for (size_t i = 0; i < local_nodes.size(); i++) { stats_.max_tree_depth = std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth); stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes; stats_.num_branch_nodes += local_stats[i].num_branch_nodes; } } else { // Single thread BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #else // !NANORT_ENABLE_PARALLEL_BUILD { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif #else // !_OPENMP // Single thread BVH build { BuildTree(&stats_, &nodes_, 0, n, /* root depth */ 0, p, pred); // [0, n) } #endif return true; } template <typename T> void BVHAccel<T>::Debug() { for (size_t i = 0; i < indices_.size(); i++) { printf("index[%d] = %d\n", int(i), int(indices_[i])); } for (size_t i = 0; i < nodes_.size(); i++) { printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i), nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[1], nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[1]); } } #if defined(NANORT_ENABLE_SERIALIZATION) template <typename T> bool BVHAccel<T>::Dump(const char *filename) const { FILE *fp = fopen(filename, "wb"); if (!fp) { // fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename); return false; } size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Dump(FILE *fp) const { size_t numNodes = nodes_.size(); assert(nodes_.size() > 0); size_t numIndices = indices_.size(); size_t r = 0; r = fwrite(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fwrite(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } template <typename T> bool BVHAccel<T>::Load(const char *filename) { FILE *fp = fopen(filename, "rb"); if (!fp) { // fprintf(stderr, "Cannot open file: %s\n", filename); return false; } size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); fclose(fp); return true; } template <typename T> bool BVHAccel<T>::Load(FILE *fp) { size_t numNodes; size_t numIndices; size_t r = 0; r = fread(&numNodes, sizeof(size_t), 1, fp); assert(r == 1); assert(numNodes > 0); nodes_.resize(numNodes); r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp); assert(r == numNodes); r = fread(&numIndices, sizeof(size_t), 1, fp); assert(r == 1); indices_.resize(numIndices); r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp); assert(r == numIndices); return true; } #endif template <typename T> inline bool IntersectRayAABB(T *tminOut, // [out] T *tmaxOut, // [out] T min_t, T max_t, const T bmin[3], const T bmax[3], real3<T> ray_org, real3<T> ray_inv_dir, int ray_dir_sign[3]); template <> inline bool IntersectRayAABB<float>(float *tminOut, // [out] float *tmaxOut, // [out] float min_t, float max_t, const float bmin[3], const float bmax[3], real3<float> ray_org, real3<float> ray_inv_dir, int ray_dir_sign[3]) { float tmin, tmax; const float min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const float min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const float min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const float max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const float max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const float max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const float tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). // 1.0000000000000004 for double precision. const float tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f; // Y const float tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const float tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f; // Z const float tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const float tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <> inline bool IntersectRayAABB<double>(double *tminOut, // [out] double *tmaxOut, // [out] double min_t, double max_t, const double bmin[3], const double bmax[3], real3<double> ray_org, real3<double> ray_inv_dir, int ray_dir_sign[3]) { double tmin, tmax; const double min_x = ray_dir_sign[0] ? bmax[0] : bmin[0]; const double min_y = ray_dir_sign[1] ? bmax[1] : bmin[1]; const double min_z = ray_dir_sign[2] ? bmax[2] : bmin[2]; const double max_x = ray_dir_sign[0] ? bmin[0] : bmax[0]; const double max_y = ray_dir_sign[1] ? bmin[1] : bmax[1]; const double max_z = ray_dir_sign[2] ? bmin[2] : bmax[2]; // X const double tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0]; // MaxMult robust BVH traversal(up to 4 ulp). const double tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.0000000000000004; // Y const double tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1]; const double tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.0000000000000004; // Z const double tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2]; const double tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.0000000000000004; tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t))); tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t))); if (tmin <= tmax) { (*tminOut) = tmin; (*tmaxOut) = tmax; return true; } return false; // no hit } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = intersector.GetT(); // current hit distance real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t; if (intersector.Intersect(&local_t, prim_idx)) { // Update isect state t = local_t; intersector.Update(t, prim_idx); hit = true; } } return hit; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTestLeafNode( std::priority_queue<H, std::vector<H>, Comp> *isect_pq, int max_intersections, const BVHNode<T> &node, const Ray<T> &ray, const I &intersector) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; T t = std::numeric_limits<T>::max(); if (isect_pq->size() >= static_cast<size_t>(max_intersections)) { t = isect_pq->top().t; // current furthest hit distance } real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T local_t = t, u = 0.0f, v = 0.0f; if (intersector.Intersect(&local_t, &u, &v, prim_idx)) { // Update isect state if ((local_t > ray.min_t)) { if (isect_pq->size() < static_cast<size_t>(max_intersections)) { H isect; t = local_t; isect.t = t; isect.u = u; isect.v = v; isect.prim_id = prim_idx; isect_pq->push(isect); // Update t to furthest distance. t = ray.max_t; hit = true; } else { if (local_t < isect_pq->top().t) { // delete furthest intersection and add new intersection. isect_pq->pop(); H hit; hit.t = local_t; hit.u = u; hit.v = v; hit.prim_id = prim_idx; isect_pq->push(hit); // Update furthest hit distance t = isect_pq->top().t; hit = true; } } } } } return hit; } #endif template <typename T> template <class I, class H> bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect, const BVHTraceOptions &options) const { const int kMaxStackDepth = 512; (void)kMaxStackDepth; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t = std::numeric_limits<T>::max(); T max_t = -std::numeric_limits<T>::max(); while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[index]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (TestLeafNode(node, ray, intersector)) { hit_t = intersector.GetT(); } } } } assert(node_stack_index < kMaxStackDepth); bool hit = (intersector.GetT() < ray.max_t); intersector.PostTraversal(ray, hit, isect); return hit; } template <typename T> template <class I> inline bool BVHAccel<T>::TestLeafNodeIntersections( const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections, const I &intersector, std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > *isect_pq) const { bool hit = false; unsigned int num_primitives = node.data[0]; unsigned int offset = node.data[1]; real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; intersector.PrepareTraversal(ray); for (unsigned int i = 0; i < num_primitives; i++) { unsigned int prim_idx = indices_[i + offset]; T min_t, max_t; if (intersector.Intersect(&min_t, &max_t, prim_idx)) { // Always add to isect lists. NodeHit<T> isect; isect.t_min = min_t; isect.t_max = max_t; isect.node_id = prim_idx; if (isect_pq->size() < static_cast<size_t>(max_intersections)) { isect_pq->push(isect); } else { if (min_t < isect_pq->top().t_min) { // delete the furthest intersection and add a new intersection. isect_pq->pop(); isect_pq->push(isect); } } } } return hit; } template <typename T> template <class I> bool BVHAccel<T>::ListNodeIntersections( const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<NodeHit<T>, 128> *hits) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >, NodeHitComparator<T> > isect_pq; (*hits)->clear(); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? 1 : 0; dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? 1 : 0; dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? 1 : 0; real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { TestLeafNodeIntersections(node, ray, max_intersections, intersector, &isect_pq); } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const NodeHit<T> &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #if 0 // TODO(LTE): Implement template <typename T> template<class I, class H, class Comp> bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray, int max_intersections, const I &intersector, StackVector<H, 128> *hits, const BVHTraceOptions& options) const { const int kMaxStackDepth = 512; T hit_t = ray.max_t; int node_stack_index = 0; unsigned int node_stack[512]; node_stack[0] = 0; // Stores furthest intersection at top std::priority_queue<H, std::vector<H>, Comp> isect_pq; (*hits)->clear(); // Init isect info as no hit intersector.Update(hit_t, static_cast<unsigned int>(-1)); intersector.PrepareTraversal(ray, options); int dir_sign[3]; dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0); real3<T> ray_inv_dir; real3<T> ray_dir; ray_dir[0] = ray.dir[0]; ray_dir[1] = ray.dir[1]; ray_dir[2] = ray.dir[2]; ray_inv_dir = vsafe_inverse(ray_dir); real3<T> ray_org; ray_org[0] = ray.org[0]; ray_org[1] = ray.org[1]; ray_org[2] = ray.org[2]; T min_t, max_t; while (node_stack_index >= 0) { unsigned int index = node_stack[node_stack_index]; const BVHNode<T> &node = nodes_[static_cast<size_t>(index)]; node_stack_index--; bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin, node.bmax, ray_org, ray_inv_dir, dir_sign); if (node.flag == 0) { // branch node if (hit) { int order_near = dir_sign[node.axis]; int order_far = 1 - order_near; // Traverse near first. node_stack[++node_stack_index] = node.data[order_far]; node_stack[++node_stack_index] = node.data[order_near]; } } else { // leaf node if (hit) { if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) { // Only update `hit_t` when queue is full. if (isect_pq.size() >= static_cast<size_t>(max_intersections)) { hit_t = isect_pq.top().t; } } } } } assert(node_stack_index < kMaxStackDepth); (void)kMaxStackDepth; if (!isect_pq.empty()) { // Store intesection in reverse order(make it frontmost order) size_t n = isect_pq.size(); (*hits)->resize(n); for (size_t i = 0; i < n; i++) { const H &isect = isect_pq.top(); (*hits)[n - i - 1] = isect; isect_pq.pop(); } return true; } return false; } #endif #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace nanort #endif // NANORT_H_
convolution_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __AVX__ static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __AVX__ _mm256_storeu_ps(tmpptr, _mm256_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __SSE__ tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr+1); __m256 _sum2 = _mm256_broadcast_ss(biasptr+2); __m256 _sum3 = _mm256_broadcast_ss(biasptr+3); __m256 _sum4 = _mm256_broadcast_ss(biasptr+4); __m256 _sum5 = _mm256_broadcast_ss(biasptr+5); __m256 _sum6 = _mm256_broadcast_ss(biasptr+6); __m256 _sum7 = _mm256_broadcast_ss(biasptr+7); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _va4 = _mm256_broadcast_ss(va+4); __m256 _va5 = _mm256_broadcast_ss(va+5); __m256 _va6 = _mm256_broadcast_ss(va+6); __m256 _va7 = _mm256_broadcast_ss(va+7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __AVX__ __m256 _sum0_7 = _mm256_loadu_ps(biasptr); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb+1); __m256 _vb2 = _mm256_broadcast_ss(vb+2); __m256 _vb3 = _mm256_broadcast_ss(vb+3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va+8); __m256 _va2 = _mm256_loadu_ps(va+16); __m256 _va3 = _mm256_loadu_ps(va+24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k<L; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7);// sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8 + (i%8)/4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr+1); __m256 _sum2 = _mm256_broadcast_ss(biasptr+2); __m256 _sum3 = _mm256_broadcast_ss(biasptr+3); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 4; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8 + (i%8)/4); #if __AVX__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3);// sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(&bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 4; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __AVX__ output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); int k=0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); vb += 4; __m128 _k0 = _mm_loadu_ps(va); va += 4; _sum0 = _mm_fmadd_ps(_p0, _k0, _sum0); } float output_sum0[4] = {0.f}; _mm_storeu_ps(output_sum0, _sum0); float sum0 = bias0 + output_sum0[0] + output_sum0[1] + output_sum0[2] + output_sum0[3]; #else float sum0 = bias0; #endif // __AVX__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #else static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 4 x 4 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/4 + p%4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4); for (int q=0; q<inch*kernel_size; q++) { #if __SSE__ _mm_storeu_ps(tmpptr, _mm_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; #endif // __SSE__ tmpptr += 4; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4 + i%4); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(biasptr[0]); __m128 _sum1 = _mm_set1_ps(biasptr[1]); __m128 _sum2 = _mm_set1_ps(biasptr[2]); __m128 _sum3 = _mm_set1_ps(biasptr[3]); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 // k1 _vb = _mm_loadu_ps(vb+4); _va0 = _mm_set1_ps(va[4]); _va1 = _mm_set1_ps(va[5]); _va2 = _mm_set1_ps(va[6]); _va3 = _mm_set1_ps(va[7]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a10-a13) * k01 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a10-a13) * k11 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a10-a13) * k21 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a10-a13) * k31 // k2 _vb = _mm_loadu_ps(vb+8); _va0 = _mm_set1_ps(va[8]); _va1 = _mm_set1_ps(va[9]); _va2 = _mm_set1_ps(va[10]); _va3 = _mm_set1_ps(va[11]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a20-a23) * k02 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a20-a23) * k12 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a20-a23) * k22 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a20-a23) * k32 // k3 _vb = _mm_loadu_ps(vb+12); _va0 = _mm_set1_ps(va[12]); _va1 = _mm_set1_ps(va[13]); _va2 = _mm_set1_ps(va[14]); _va3 = _mm_set1_ps(va[15]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a30-a33) * k03 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a30-a33) * k13 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a30-a33) * k23 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a30-a33) * k33 va += 16; vb += 16; } for (; k<L; k++) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 va += 4; vb += 4; } _mm_storeu_ps(output0, _sum0); _mm_storeu_ps(output1, _sum1); _mm_storeu_ps(output2, _sum2); _mm_storeu_ps(output3, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+4]; sum1[n] += va[1] * vb[n+4]; sum2[n] += va[2] * vb[n+4]; sum3[n] += va[3] * vb[n+4]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+12]; sum1[n] += va[1] * vb[n+12]; sum2[n] += va[2] * vb[n+12]; sum3[n] += va[3] * vb[n+12]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+20]; sum1[n] += va[1] * vb[n+20]; sum2[n] += va[2] * vb[n+20]; sum3[n] += va[3] * vb[n+20]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+28]; sum1[n] += va[1] * vb[n+28]; sum2[n] += va[2] * vb[n+28]; sum3[n] += va[3] * vb[n+28]; va -= 28; } va += 32; vb += 32; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __SSE__ output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));// sum0 += (k00-k30) * a00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1));// sum1 += (k01-k31) * a10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2));// sum2 += (k02-k32) * a20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3));// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0));// sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3[0]; output1[0] = _sum0_3[1]; output2[0] = _sum0_3[2]; output3[0] = _sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __SSE__ output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4 + i%4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); __m128 _vb0 = _mm_loadu_ps(vb); __m128 _vb1 = _mm_loadu_ps(vb+4); __m128 _vb2 = _mm_loadu_ps(vb+8); __m128 _vb3 = _mm_loadu_ps(vb+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0));// sum0 = (a00-a03) * k00 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1));// sum0 += (a10-a13) * k01 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2));// sum0 += (a20-a23) * k02 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3));// sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k<L; k++) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _vb0 = _mm_loadu_ps(vb); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } _mm_storeu_ps(output, _sum0); #else float sum[4] = {0}; int k=0; for (; k+3<L; k=k+4) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+4]; sum[n] += va[2] * vb[n+8]; sum[n] += va[3] * vb[n+12]; sum[n] += va[4] * vb[n+16]; sum[n] += va[5] * vb[n+20]; sum[n] += va[6] * vb[n+24]; sum[n] += va[7] * vb[n+28]; } va += 8; vb += 32; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = sum[n] + bias0; } #endif // __SSE__ output += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4 + i%4); int k=0; #if __SSE__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } float sum0 = bias0 + _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = bias0; #endif // __SSE__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #endif
conduct.h
float __conduct = 0; int32_t *G_member; void outputConduct(graph *G) { printf("sum C = %lf\n", __conduct); } void conduct(graph *G) { inittracking("conduct.csv"); __conduct = 0; for (int i = 0; i < 4; i++) { float m = 0.0 ; int32_t __S2 = 0 ; int32_t __S3 = 0 ; int32_t __S4 = 0 ; #pragma omp parallel { int32_t __S2_prv = 0 ; __S2_prv = 0 ; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (node_t u = 0; u < G->numNodes; u ++) if ((G_member[u] == i)) { __S2_prv = __S2_prv + (G->begin[u+1] - G->begin[u]) ; } #pragma omp atomic __S2 += __S2_prv; } #pragma omp parallel { int32_t __S3_prv = 0 ; __S3_prv = 0 ; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (node_t u0 = 0; u0 < G->numNodes; u0 ++) if ((G_member[u0] != i)) { __S3_prv = __S3_prv + (G->begin[u0+1] - G->begin[u0]) ; } #pragma omp atomic __S3 += __S3_prv; } #pragma omp parallel { int32_t __S4_prv = 0 ; __S4_prv = 0 ; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (node_t u1 = 0; u1 < G->numNodes; u1 ++) if ((G_member[u1] == i)) { for (edge_t j_idx = G->begin[u1];j_idx < G->begin[u1+1] ; j_idx ++) { node_t j = G->node_idx [j_idx]; if ((G_member[j] != i)) { __S4_prv = __S4_prv + 1 ; } } } #pragma omp atomic __S4 += __S4_prv; } m = (float)((__S2 < __S3)?__S2:__S3) ; if(m == 0) { __conduct += __S4; } else { __conduct += __S4/m; } } endtracking(); }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale, translate; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { register ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns, const size_t channels,MagickBooleanType highres) { register ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel) \ (source_info->scale*QuantumScale*(pixel)+source_info->translate) #define SetLCMSPixel(target_info,pixel) \ ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate) register double *p; register ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q)); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q)); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q)); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id], target_info->pixels[id],(unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { register Quantum *p; register ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id], target_info->pixels[id],(unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; #if !defined(MAGICKCORE_HDRI_SUPPORT) const char *artifact; #endif MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; #endif source_info.scale=1.0; source_info.translate=0.0; source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_CMYK_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_CMYK_16; else #endif { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; source_info.scale=100.0; } break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_16; else #endif source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_Lab_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_Lab_16; else #endif { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale=100.0; source_info.translate=(-0.5); } break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_16; else #endif source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_16; else #endif source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); target_info.scale=1.0; target_info.translate=0.0; target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_CMYK_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_CMYK_16; else #endif { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; target_info.scale=0.01; } break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_16; else #endif target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_Lab_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_Lab_16; else #endif { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale=0.01; target_info.translate=0.5; } break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_16; else #endif target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; #if (MAGICKCORE_QUANTUM_DEPTH == 8) if (highres == MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_8; else #elif (MAGICKCORE_QUANTUM_DEPTH == 16) if (highres == MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_16; else #endif target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info, flags,cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info,transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info,transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static void PatchCorruptProfile(const char *name,StringInfo *profile) { register unsigned char *p; size_t length; /* Detect corrupt profiles and if discovered, repair. */ if (LocaleCompare(name,"xmp") == 0) { /* Remove garbage after xpacket end. */ p=GetStringInfoDatum(profile); p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>"); if (p != (unsigned char *) NULL) { p+=19; length=p-GetStringInfoDatum(profile); if (length != GetStringInfoLength(profile)) { *p='\0'; SetStringInfoLength(profile,length); } } return; } if (LocaleCompare(name,"exif") == 0) { /* Check if profile starts with byte order marker instead of Exif. */ p=GetStringInfoDatum(profile); if ((LocaleNCompare((const char *) p,"MM",2) == 0) || (LocaleNCompare((const char *) p,"II",2) == 0)) { const unsigned char profile_start[] = "Exif\0\0"; StringInfo *exif_profile; exif_profile=AcquireStringInfo(6); if (exif_profile != (StringInfo *) NULL) { SetStringInfoDatum(exif_profile,profile_start); ConcatenateStringInfo(exif_profile,profile); SetStringInfoLength(profile,GetStringInfoLength(exif_profile)); SetStringInfo(profile,exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; StringInfo *clone_profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clone_profile=CloneStringInfo(profile); PatchCorruptProfile(name,clone_profile); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,clone_profile,exception) == MagickFalse)) { clone_profile=DestroyStringInfo(clone_profile); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),clone_profile); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,clone_profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,clone_profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { register ssize_t i; ssize_t knot_count, selector; knot_count=0; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096/4096; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096/4096; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } knot_count--; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const StringInfo *profile, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { unsigned char *info; size_t length; ssize_t count, id; assert(profile != (StringInfo *) NULL); assert(new_geometry != (RectangleInfo *) NULL); length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
rose_Stress-1.c
//#include <float.h> //#include <math.h> #define MIN(a, b) ( (a < b) ? a : b) #define MAX(a, b) ( (a > b) ? a : b) #include <omp.h> typedef double real8; void StressCheckEpsFail(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,real8 *eps,real8 eps_failure_model,const int *zoneset,int length) { int i; int index; #pragma omp parallel for private (index,i) firstprivate (eps_failure_model,length) for (i = 0; i <= length - 1; i += 1) { index = zoneset[i]; if (eps[zoneset[i]] > eps_failure_model) { newSxx[i] = 0.0; newSyy[i] = 0.0; newSzz[i] = 0.0; newTxy[i] = 0.0; newTxz[i] = 0.0; newTyz[i] = 0.0; eps[zoneset[i]] = eps_failure_model * 1.01; } } } void StressStrainWork(real8 *deltz,real8 *delts,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,const real8 *dxx,const real8 *dyy,const real8 *dzz,const real8 *dxy,const real8 *dxz,const real8 *dyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length) { int i; int index; real8 quarterDelta = 0.25 * deltaTime; real8 szz; #pragma omp parallel for private (index,szz,i) firstprivate (length,quarterDelta) for (i = 0; i <= length - 1; i += 1) { index = zoneset[i]; szz = -sxx[zoneset[i]] - syy[zoneset[i]]; deltz[zoneset[i]] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + dyy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) + dzz[zoneset[i]] * (szz + newSzz[i]) + 2. * dxy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * dxz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * dyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])); delts[i] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * sxx[zoneset[i]] + dyy[zoneset[i]] * syy[zoneset[i]] + dzz[zoneset[i]] * szz + 2. * dxy[zoneset[i]] * txy[zoneset[i]] + 2. * dxz[zoneset[i]] * txz[zoneset[i]] + 2. * dyz[zoneset[i]] * tyz[zoneset[i]]); } } void StressStrainHeat(const real8 *deltz,real8 *deltzh,real8 *deltrh,const real8 *shearMod,const real8 *shearRatio,const real8 *shearDer,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length) { real8 shearr; real8 sheari; real8 avgMod; int nz; int i; deltaTime = deltaTime; /* Quiet the compiler - unused argument */ #pragma omp parallel for private (shearr,sheari,avgMod,nz,i) firstprivate (length) for (i = 0; i <= length - 1; i += 1) { nz = zoneset[i]; shearr = 0.5 * shearRatio[i]; if (shearMod[zoneset[i]] > 0.) { sheari = 0.5 / shearMod[zoneset[i]]; deltrh[zoneset[i]] = .25 * (vnewc[i] + vc[i]) * ((newSxx[i] * sheari - sxx[zoneset[i]] * shearr) * (sxx[zoneset[i]] + newSxx[i]) + (newSyy[i] * sheari - syy[zoneset[i]] * shearr) * (syy[zoneset[i]] + newSyy[i]) + (newSzz[i] * sheari + (syy[zoneset[i]] + sxx[zoneset[i]]) * shearr) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * (newTxy[i] * sheari - txy[zoneset[i]] * shearr) * (txy[zoneset[i]] + newTxy[i]) + 2. * (newTxz[i] * sheari - txz[zoneset[i]] * shearr) * (txz[zoneset[i]] + newTxz[i]) + 2. * (newTyz[i] * sheari - tyz[zoneset[i]] * shearr) * (tyz[zoneset[i]] + newTyz[i])); } else { deltrh[zoneset[i]] = - .25 * (vnewc[i] + vc[i]) * (sxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + syy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) - (syy[zoneset[i]] + sxx[zoneset[i]]) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * txy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * txz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * tyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])) * shearr; } deltzh[zoneset[i]] = deltz[zoneset[i]] - deltrh[zoneset[i]]; avgMod = 0.5 * shearMod[zoneset[i]]; if (shearRatio[i] > 0.0) avgMod = avgMod + 0.5 / shearRatio[i]; if (avgMod > 0.0) deltrh[zoneset[i]] = shearDer[i] * deltrh[zoneset[i]] / avgMod; else deltrh[zoneset[i]] = 0.0; } }
MatrixFreeSolver.h
#ifndef __MatrixFreeSolver_H__ #define __MatrixFreeSolver_H__ #include <Eigen/Core> #include <Eigen/Dense> #include <Eigen/Sparse> using SystemMatrixType = Eigen::SparseMatrix<Real>; namespace SPH { class MatrixReplacement; } namespace Eigen { namespace internal { template<> struct traits<SPH::MatrixReplacement> : public Eigen::internal::traits<SystemMatrixType> {}; } } namespace SPH { /** Replacement of the matrix in the linear system which is required for a * matrix-free solver. */ class MatrixReplacement : public Eigen::EigenBase<MatrixReplacement> { public: // Required typedefs, constants, and method: typedef Real Scalar; typedef Real RealScalar; typedef int StorageIndex; typedef void(*MatrixVecProdFct) (const Real*, Real*, void *); enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic, IsRowMajor = false }; Index rows() const { return m_dim; } Index cols() const { return m_dim; } template<typename Rhs> Eigen::Product<MatrixReplacement, Rhs, Eigen::AliasFreeProduct> operator*(const Eigen::MatrixBase<Rhs>& x) const { return Eigen::Product<MatrixReplacement, Rhs, Eigen::AliasFreeProduct>(*this, x.derived()); } MatrixReplacement(const unsigned int dim, MatrixVecProdFct fct, void *userData) : m_dim(dim), m_matrixVecProdFct(fct), m_userData(userData) {} void * getUserData() { return m_userData; } MatrixVecProdFct getMatrixVecProdFct() { return m_matrixVecProdFct; } protected: unsigned int m_dim; void *m_userData; /** matrix vector product callback */ MatrixVecProdFct m_matrixVecProdFct; }; /** Matrix-free Jacobi preconditioner */ class JacobiPreconditioner1D { public: typedef typename SystemMatrixType::StorageIndex StorageIndex; typedef void(*DiagonalMatrixElementFct) (const unsigned int, Real&, void *); enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic }; JacobiPreconditioner1D() {} void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData) { m_dim = dim; m_diagonalElementFct = fct; m_userData = userData; } Eigen::Index rows() const { return m_dim; } Eigen::Index cols() const { return m_dim; } Eigen::ComputationInfo info() { return Eigen::Success; } template<typename MatType> JacobiPreconditioner1D& analyzePattern(const MatType&) { return *this; } template<typename MatType> JacobiPreconditioner1D& factorize(const MatType& mat) { return *this; } template<typename MatType> JacobiPreconditioner1D& compute(const MatType& mat) { m_invDiag.resize(m_dim); #pragma omp parallel default(shared) { #pragma omp for schedule(static) for (int i = 0; i < (int)m_dim; i++) { Real res; m_diagonalElementFct(i, res, m_userData); m_invDiag[i] = 1.0 / res; } } return *this; } template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { x = m_invDiag.array() * b.array(); } template<typename Rhs> inline const Eigen::Solve<JacobiPreconditioner1D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const { return Eigen::Solve<JacobiPreconditioner1D, Rhs>(*this, b.derived()); } protected: unsigned int m_dim; /** diagonal matrix element callback */ DiagonalMatrixElementFct m_diagonalElementFct; void *m_userData; Eigen::VectorXd m_invDiag; }; /** Matrix-free Jacobi preconditioner */ class JacobiPreconditioner3D { public: typedef typename SystemMatrixType::StorageIndex StorageIndex; typedef void(*DiagonalMatrixElementFct) (const unsigned int, Vector3r&, void *); enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic }; JacobiPreconditioner3D() {} void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData) { m_dim = dim; m_diagonalElementFct = fct; m_userData = userData; } Eigen::Index rows() const { return 3*m_dim; } Eigen::Index cols() const { return 3*m_dim; } Eigen::ComputationInfo info() { return Eigen::Success; } template<typename MatType> JacobiPreconditioner3D& analyzePattern(const MatType&) { return *this; } template<typename MatType> JacobiPreconditioner3D& factorize(const MatType& mat) { return *this; } template<typename MatType> JacobiPreconditioner3D& compute(const MatType& mat) { m_invDiag.resize(m_dim*3); #pragma omp parallel default(shared) { #pragma omp for schedule(static) for (int i = 0; i < (int)m_dim; i++) { Vector3r res; m_diagonalElementFct(i, res, m_userData); m_invDiag[3*i] = 1.0 / res[0]; m_invDiag[3*i+1] = 1.0 / res[1]; m_invDiag[3*i+2] = 1.0 / res[2]; } } return *this; } template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { x = m_invDiag.array() * b.array(); } template<typename Rhs> inline const Eigen::Solve<JacobiPreconditioner3D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const { return Eigen::Solve<JacobiPreconditioner3D, Rhs>(*this, b.derived()); } protected: unsigned int m_dim; /** diagonal matrix element callback */ DiagonalMatrixElementFct m_diagonalElementFct; void *m_userData; Eigen::VectorXd m_invDiag; }; /** Matrix-free 3x3 block Jacobi preconditioner */ class BlockJacobiPreconditioner3D { public: typedef typename SystemMatrixType::StorageIndex StorageIndex; typedef void(*DiagonalMatrixElementFct) (const unsigned int, Matrix3r&, void *); enum { ColsAtCompileTime = Eigen::Dynamic, MaxColsAtCompileTime = Eigen::Dynamic }; BlockJacobiPreconditioner3D() {} void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData) { m_dim = dim; m_diagonalElementFct = fct; m_userData = userData; } Eigen::Index rows() const { return 3 * m_dim; } Eigen::Index cols() const { return 3 * m_dim; } Eigen::ComputationInfo info() { return Eigen::Success; } template<typename MatType> BlockJacobiPreconditioner3D& analyzePattern(const MatType&) { return *this; } template<typename MatType> BlockJacobiPreconditioner3D& factorize(const MatType& mat) { return *this; } template<typename MatType> BlockJacobiPreconditioner3D& compute(const MatType& mat) { m_invDiag.resize(m_dim); #pragma omp parallel default(shared) { #pragma omp for schedule(static) for (int i = 0; i < (int)m_dim; i++) { Matrix3r res; m_diagonalElementFct(i, res, m_userData); m_invDiag[i] = res.inverse(); } } return *this; } template<typename Rhs, typename Dest> void _solve_impl(const Rhs& b, Dest& x) const { #pragma omp parallel default(shared) { #pragma omp for schedule(static) for (int i = 0; i < (int)m_dim; i++) { static_cast<VectorXr&>(x).block<3, 1>(3 * i, 0) = m_invDiag[i] * static_cast<const VectorXr&>(b).block<3, 1>(3 * i, 0); } } } template<typename Rhs> inline const Eigen::Solve<BlockJacobiPreconditioner3D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const { return Eigen::Solve<BlockJacobiPreconditioner3D, Rhs>(*this, b.derived()); } protected: unsigned int m_dim; /** diagonal matrix element callback */ DiagonalMatrixElementFct m_diagonalElementFct; void *m_userData; std::vector<Matrix3r> m_invDiag; }; } namespace Eigen { namespace internal { using namespace SPH; /** Implementation of the matrix-free matrix vector product */ template<typename Rhs> struct generic_product_impl<MatrixReplacement, Rhs, SparseShape, DenseShape, GemvProduct> // GEMV stands for generic matrix-vector : generic_product_impl_base<MatrixReplacement, Rhs, generic_product_impl<MatrixReplacement, Rhs> > { typedef typename Product<MatrixReplacement, Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const MatrixReplacement& lhs, const Rhs& rhs, const Scalar& alpha) { // This method should implement "dst += alpha * lhs * rhs" inplace, // however, for iterative solvers, alpha is always equal to 1, so let's not bother about it. assert(alpha == Scalar(1) && "scaling is not implemented"); const Real *vec = &rhs(0); Real *res = &dst(0); MatrixReplacement& lhs_ = const_cast<MatrixReplacement&>(lhs); lhs_.getMatrixVecProdFct()(vec, res, lhs_.getUserData()); } }; } } #endif
single.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp single { x++; } } printf("x=%d\n", x); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_single_in_block_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], parent_task_id=[[TASK_ID:[0-9]+]], workshare_function={{0x[0-f]+}}, count=1 // CHECK: {{^}}[[THREAD_ID]]: ompt_event_single_in_block_end: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], workshare_function={{0x[0-f]+}}, count=1 // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_single_others_begin: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]], workshare_function={{0x[0-f]+}}, count=1 // CHECK: {{^}}[[THREAD_ID]]: ompt_event_single_others_end: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], workshare_function={{0x[0-f]+}}, count=1 return 0; }
bcnn_utils.h
/* * Copyright (c) 2016-present Jean-Noel Braun. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef BCNN_UTILS_H #define BCNN_UTILS_H #include "bcnn/bcnn.h" /* OpenMP */ #ifdef BCNN_USE_OPENMP #include <omp.h> #endif /* Cuda include */ #ifdef BCNN_USE_CUDA #include <cublas_v2.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand.h> #include <driver_types.h> // cuda driver types #ifdef BCNN_USE_CUDNN #include <cudnn.h> #endif #endif #include <stdio.h> #include <stdlib.h> // RAND_MAX #include <time.h> #ifdef __cplusplus extern "C" { #endif typedef struct { int state; float r; } bcnn_gauss_gen; typedef struct { bcnn_log_callback fct; bcnn_log_level lvl; } bcnn_log_context; /** Convenient macros */ #define BCNN_CHECK(exp, err) \ do { \ if (!(exp)) { \ return (err); \ } \ } while (0) #define BCNN_CHECK_AND_LOG(ctx, exp, err, fmt, ...) \ do { \ if (!(exp)) { \ bcnn_log((ctx), BCNN_LOG_ERROR, (fmt), ##__VA_ARGS__); \ return (err); \ } \ } while (0) #define BCNN_CHECK_STATUS(s) \ do { \ bcnn_status ret = (s); \ if ((ret) != BCNN_SUCCESS) { \ return (ret); \ } \ } while (0) #define BCNN_ERROR(ctx, err, fmt, ...) \ do { \ bcnn_log((ctx), BCNN_LOG_ERROR, (fmt), ##__VA_ARGS__); \ return (err); \ } while (0) #define BCNN_INFO(ctx, fmt, ...) \ do { \ bcnn_log((ctx), BCNN_LOG_INFO, (fmt), ##__VA_ARGS__); \ } while (0) #define BCNN_WARNING(ctx, fmt, ...) \ do { \ bcnn_log((ctx), BCNN_LOG_WARNING, (fmt), ##__VA_ARGS__); \ } while (0) #define BCNN_PARSE_CLEANUP(l, t, n) \ do { \ for (int i = 0; i < (n); ++i) { \ bh_free((t[i])); \ } \ bh_free(t); \ bh_free(l); \ } while (0) float bcnn_rng_gaussian(bcnn_gauss_gen *g); void bcnn_log(bcnn_log_context ctx, bcnn_log_level level, const char *fmt, ...); void bcnn_draw_color_box(unsigned char *img, int w_img, int h_img, float cx_box, float cy_box, float w_box, float h_box, unsigned char color[3]); static inline int bcnn_rand_between(int min, int max) { if (min > max) { return 0.f; } return (int)(((float)rand() / RAND_MAX * (max - min)) + min + 0.5f); } static inline int bcnn_omp_get_num_threads() { #ifdef BCNN_USE_OPENMP int n = 0; #pragma omp parallel reduction(+ : n) n += 1; return n; #else return 1; #endif } static inline const char *bcnn_act2str(bcnn_activation a) { switch (a) { case BCNN_ACT_TANH: return "Tanh"; case BCNN_ACT_RELU: return "ReLU"; case BCNN_ACT_RAMP: return "Ramp"; case BCNN_ACT_SOFTPLUS: return "Softplus"; case BCNN_ACT_LRELU: return "Leaky-ReLU"; case BCNN_ACT_ABS: return "AbsVal"; case BCNN_ACT_CLAMP: return "Clamp"; case BCNN_ACT_PRELU: return "PReLU"; default: return "None"; } } #ifdef BCNN_USE_CUDA #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200) #define BCNN_CUDA_THREADS 1024 #else #define BCNN_CUDA_THREADS 512 #endif static inline int bcnn_cuda_blocks(const int n) { return (n - 1) / (BCNN_CUDA_THREADS) + 1; } cublasHandle_t bcnn_cublas_handle(); #define bcnn_cuda_check(RET) \ { \ if ((RET) != cudaSuccess) { \ fprintf(stderr, "[ERROR] [CUDA] %s\n", cudaGetErrorString((RET))); \ exit((RET)); \ } \ } #define bcnn_cublas_check(RET) \ { \ if ((RET) != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "[ERROR] [CUBLAS] %d\n", (int)(RET)); \ exit((RET)); \ } \ } #define bcnn_curand_check(RET) \ { \ if ((RET) != CURAND_STATUS_SUCCESS) { \ fprintf(stderr, "[ERROR] [CURAND] %d\n", (int)(RET)); \ exit((RET)); \ } \ } int *bcnn_cuda_malloc_i32(int n); float *bcnn_cuda_malloc_f32(int n); float *bcnn_cuda_memcpy_f32(float *x, int n); void bcnn_cuda_memcpy_f32_noalloc(float *x, float *x_gpu, int n); void bcnn_cuda_fill_with_random(float *x_gpu, int n); void bcnn_cuda_free(void *x_gpu); void bcnn_cuda_memcpy_host2dev(float *x_gpu, float *x, int n); void bcnn_cuda_memcpy_dev2host(float *x_gpu, float *x, int n); void bcnn_cuda_set_device(int id); #ifdef BCNN_USE_CUDNN #define bcnn_cudnn_check(RET) \ { \ if ((RET) != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "[ERROR] [CUDNN] %s\n", \ cudnnGetErrorString((RET))); \ exit((RET)); \ } \ } cudnnHandle_t bcnn_cudnn_handle(); #endif // BCNN_USE_CUDNN #endif // BCNN_USE_CUDA #ifdef __cplusplus } #endif #endif // BCNN_UTILS_H
convolution_1x1_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_packn_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * packn; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(outptr, _val, vl); r0 += packn * 2; outptr += packn; } r0 += tailstep; } } conv1x1s1_sgemm_packn_fp16sa_rvv(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
MCarloX.c
/* Project: mpi_monte_carlo File: mpi_monte_carlo.cpp Created By: James Williams Date: 11/14/15. Purpose: Estimate Pi by simulation of throwing darts at a dart board. Program will use MPI for distributed computing across nodes. 1. Process 0 should read in the total number of tosses and broadcast it to the other processes. 2. Use MPI Reduce to find the global sum of the local variable number in circle, and have process 0 print the result. Note: num_darts was somehow being altered mid program, so the variable was reset to the original number of darts to be thrown right before process 0 calculates pi. Compile: mpicc MCarloX.c -fopenmp -o MCarloX Usage: mpiexec -n <# nodes or processors> MPI_MCarlo */ /* prototype */ double calc_pi(long long, long long); /* prototype */ /* include */ #include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> /* include */ int main(int argc, char * argv[]) { long long i, local_darts, local_hits, total_hits, num_darts = 16000000; double pi, x, y; int my_rank, num_nodes; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_nodes); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // if (argc <=1) { // MPI_Finalize(); // exit(-1); // } // scanf(argv[1],"%d", num_nodes); // 653331 local_darts = num_darts/num_nodes; // if(my_rank == 0){ // printf("num_nodes: %d\n", num_nodes ); // printf("num_darts: %lld\n", num_darts ); // printf("my rank: %d\tlocal_darts: %lld\n", my_rank, local_darts ); // }else{ // printf("my rank: %d\tlocal_darts: %lld\n", my_rank, local_darts ); // } if(my_rank == 0){ local_darts = (num_darts-(num_nodes-1)*local_darts); /*************************************************** * I do not understand this calculation. You have * already correctly calculated local darts. */ } unsigned int seed; seed = time(NULL)+my_rank; total_hits =0; local_hits = 0; srand(my_rank); #pragma omp parallel for \ reduction(+: local_hits) private (i, x, y) for (i = 0; i < local_darts; i++){ x = ((double) rand_r(&seed)/RAND_MAX*(1.0 - 0.0) + 0.0); y = ((double) rand_r(&seed+num_nodes)/RAND_MAX*(1.0 - 0.0) + 0.0); if((x*x) + (y*y) <= 1) { ++local_hits; } } printf("my rank: %d --> local_hits: %lld\n",my_rank, local_hits ); MPI_Reduce(&local_hits, &total_hits, 1, MPI_LONG_LONG, MPI_SUM, 0, MPI_COMM_WORLD); /* see note at top */ num_darts = 16000000; if(my_rank == 0){ pi = calc_pi(num_darts, total_hits); printf("\n\nEvaluation of Pi\nTotal darts = %lld\t", num_darts); printf("Total Hits = %lld\t", total_hits); printf("Pi = %f\n", pi); } MPI_Finalize(); return 0; } double calc_pi(long long n, long long m){ double my_pi = (4.0 * (double)m / (double)n); return my_pi; } /* calc_pi */ /******************************************** * 50/50 Please see my comment in your code. */
master.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid, a[n],suma=0,sumalocal; if(argc < 2) { fprintf(stderr,"\nFalta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel private(sumalocal,tid) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for schedule(static) for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n", tid,suma); } return 0; }
openmp_wrapper.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <LightGBM/utils/log.h> #include <omp.h> #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> inline int OMP_NUM_THREADS() { int ret = 1; #pragma omp parallel #pragma omp master { ret = omp_get_num_threads(); } return ret; } class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { // only catch first exception. if (ex_ptr_ != nullptr) { return; } std::unique_lock<std::mutex> guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std::current_exception(); } private: std::exception_ptr ex_ptr_; std::mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() \ } \ catch (std::exception & ex) { \ Log::Warning(ex.what()); \ omp_except_helper.CaptureException(); \ } \ catch (...) { \ omp_except_helper.CaptureException(); \ } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else /* * To be compatible with OpenMP, define a nothrow macro which is used by gcc * openmp, but not by clang. * See also https://github.com/dmlc/dmlc-core/blob/3106c1cbdcc9fc9ef3a2c1d2196a7a6f6616c13d/include/dmlc/omp.h#L14 */ #if defined(__clang__) #undef __GOMP_NOTHROW #define __GOMP_NOTHROW #elif defined(__cplusplus) #undef __GOMP_NOTHROW #define __GOMP_NOTHROW throw() #else #undef __GOMP_NOTHROW #define __GOMP_NOTHROW __attribute__((__nothrow__)) #endif #ifdef _MSC_VER #pragma warning(disable : 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) __GOMP_NOTHROW {} // NOLINT (no cast done here) inline int omp_get_num_threads() __GOMP_NOTHROW {return 1;} inline int omp_get_max_threads() __GOMP_NOTHROW {return 1;} inline int omp_get_thread_num() __GOMP_NOTHROW {return 0;} inline int OMP_NUM_THREADS() __GOMP_NOTHROW { return 1; } #ifdef __cplusplus } // extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); // C++AMP declarator diagnostic functions bool DiagnoseCXXAMPDecl(Decl* Dcl, bool CheckContainer = false, bool IsInfer = false); bool IsCXXAMPTileStatic(Declarator &D); void DiagnosticCXXAMPTileStatic(Declarator &D, Decl *Dcl); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); // C++AMP restriction specifier inferring routine void TryCXXAMPRestrictionInferring(Decl *D, Stmt *Body); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; // C++AMP diagnostic routine on destructor overload resolution void DiagnoseCXXAMPDtorOverload(FunctionDecl *New, const LookupResult &Old); OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // C++AMP restriction specifier scope checking routines bool IsInAMPRestricted(); // Determine if in CPU and/or AMP restricted codes bool IsInAnyExplicitRestricted(); void GetCXXAMPParentRestriction(Scope* SC, bool& ParentCPU, bool& ParentAMP, bool&ParentAUTO); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); // C++AMP diagnostic routine on overloaded call expressions void DiagnoseCXXAMPOverloadedCallExpr(SourceLocation LParenLoc, FunctionDecl* Callee); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; // C++AMP type checking routine for kernel codes public: bool IsIncompatibleType(const Type* Ty, bool CheckContainer = false, bool IsInfer = false); private: // C++AMP type checking routine for kernel codes bool IsCXXAMPUnsupportedPointerType(const Type* Ty, bool CheckContainer = false, bool IsInfer = false); bool IsCXXAMPUnsupportedReferenceType(const Type* Ty, bool CheckContainer = false, bool IsInfer = false); bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); // C++AMP diagnotic routine on C++ method call expressions void DiagnoseCXXAMPMethodCallExpr(SourceLocation LParenLoc, CXXMethodDecl *Callee); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); // C++AMP restriction specifier calculation routines for special member function void InheritSMFDtorIntersections(CXXRecordDecl* RDecl, bool& CPUAttr, bool& AMPAttr, bool& ParentCPUAttr, bool& ParentAMPAttr); void InheritSMFCtorIntersections(CXXRecordDecl* RDecl, bool& CPUAttr, bool& AMPAttr, bool& ParentCPUAttr, bool& ParentAMPAttr, int flag, bool ConstParam = true); void InheritSMFMethodIntersections(CXXRecordDecl* RDecl, bool& CPUAttr, bool& AMPAttr, bool& ParentCPUAttr, bool& ParentAMPAttr, int flag, bool ConstParam = true); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Defines an AMP CUP-side serialize function. void DefineAmpCpuSerializeFunction(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Defines an AMP GPU-side deserialize function. void DefineAmpGpuDeSerializeFunction(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare trampoline name lookup code for AMP CPU-side void DeclareAMPTrampolineName(CXXRecordDecl *ClassDecl, DeclarationName Name); /// Declare trampoline code for AMP GPU-side entry void DeclareAMPTrampoline(CXXRecordDecl *ClassDecl, DeclarationName Name); /// Define trampoline code for AMP GPU-side entry void DefineAMPTrampoline(SourceLocation CurrentLocation, CXXMethodDecl *OperatorCall); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); /// \brief Test if a given class requires a /// C++AMP deserializer declaration bool NeedAMPDeserializer(CXXRecordDecl *ClassDecl); /// \brief Test if a given class has a C++AMP deserializer declaration bool HasDeclaredAMPDeserializer(CXXRecordDecl *ClassDecl); // Declare C++AMP serializer and deserializer typedef SmallVector<QualType, 16> AMPDeserializerArgs; void DeclareAMPSerializer(CXXRecordDecl *ClassDecl, DeclarationName Name); void DeclareAMPDeserializer(CXXRecordDecl *ClassDecl, AMPDeserializerArgs *Args); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; // C++AMP diagnotic routine for template arguments void DiagnoseCXXAMPTemplateArgument(NamedDecl *Param, const TemplateArgumentLoc &AL, NamedDecl *Template, SourceLocation TemplateLoc); bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; // C++AMP diagnostic routine for expressions void DiagnoseCXXAMPExpr(Expr* Stripped, ExprResult &HS, bool DiagnoseWhenStatic=false); /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
origamitag.c
//----------------------------------------------------------------------------- // Copyright (c) 2016, Bridget L. Falck, Mark C. Neyrinck, & Nuala McCullagh // // Distributed under the terms of the Modified BSD License. // // The full license is in the file LICENSE, distributed with this software. //----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include "params.h" #define DL for (d=0;d<3;d++) #define BF 1e30 #define max(A,B) (((A)>(B)) ? (A):(B)) #define goodmod(A,B) (((A) >= (B)) ? (A-B):(((A) < 0) ? (A+B):(A))) int isneg(int h) { return (int)(h < 0); } int par(int i, int j, int k, int ng) { return i + (j + k*ng)*ng; } void read_parameters(char *filename); int posread(char *filename, float ***p, float fact); int readgadget(char *filename, float ***p); int main(int argc, char *argv[]) { int exitcode; int np; float **r; FILE *pos, *tag; char *paramfile,tagoutfile[80]; float xmin,xmax,ymin,ymax,zmin,zmax; int s; float negb2,b2; int ng2,ng4, h, i,i2, x,y,z,nhalo,nhalo0,nhalo1,nhalo2,nhaloany; unsigned char *m,*m0,*m1,*m2, mn,m0n,m1n,m2n; /*Morphology tag */ int zstart, ystart, xstart, zend, yend, xend; float dx,d1,d2; if (argc < 2) { printf("Please specify a parameter file\n"); exit(0); } else paramfile = argv[1]; read_parameters(paramfile); b2 = boxsize/2.; negb2 = -boxsize/2.; sprintf(tagoutfile,"%s%stag.dat",outdir,taglabel); ng2=np1d/2; ng4=np1d/4; if (numfiles > 0) { np = readgadget(posfile,&r); } else np = posread(posfile,&r,1.); printf("%d particles\n",np);fflush(stdout); xmin = BF; xmax = -BF; ymin = BF; ymax = -BF; zmin = BF; zmax = -BF; m = (unsigned char *)malloc(np*sizeof(unsigned char)); m0 = (unsigned char *)malloc(np*sizeof(unsigned char)); /* for the diagonals */ m1 = (unsigned char *)malloc(np*sizeof(unsigned char)); m2 = (unsigned char *)malloc(np*sizeof(unsigned char)); for (i=0; i<np;i++) { if (r[i][0]<xmin) xmin = r[i][0]; if (r[i][0]>xmax) xmax = r[i][0]; if (r[i][1]<ymin) ymin = r[i][1]; if (r[i][1]>ymax) ymax = r[i][1]; if (r[i][2]<zmin) zmin = r[i][2]; if (r[i][2]>zmax) zmax = r[i][2]; m[i] = 1; m0[i] = 1; m1[i] = 1; m2[i] = 1; } if (m==NULL) { printf("Morphology array cannot be allocated.\n"); exit(0); } // printf("np: %d, x: %f,%f; y: %f,%f; z: %f,%f\n",np,xmin,xmax, ymin,ymax, zmin,zmax); fflush(stdout); printf("Calculating ORIGAMI morphology.\n"); #pragma omp parallel for default(none) shared(np1d, ng4, r, boxsize, negb2, b2, m, m0, m1, m2, nsplit) private (x, y, z, h, dx, i, i2, d1, d2, xstart, xend, ystart, yend, zstart, zend) for (s = 0; s < nsplit*nsplit*nsplit; s++) { zstart = s / (nsplit*nsplit); zend = zstart + 1; ystart = (s - zstart * nsplit * nsplit)/nsplit; yend = ystart + 1; xstart = (s - zstart * nsplit * nsplit - ystart * nsplit); xend = xstart + 1; zstart *= (np1d/nsplit); ystart *= (np1d/nsplit); xstart *= (np1d/nsplit); zend *= (np1d/nsplit); yend *= (np1d/nsplit); xend *= (np1d/nsplit); for (x=xstart; x<xend; x++){ // printf("%d\n",x);fflush(stdout); for (y=ystart; y<yend; y++) { for (z=zstart; z<zend; z++) { i = par(x,y,z,np1d); /* First just along the Cartesian axes */ /* x-direction */ for (h=1; abs(h)<ng4; h = -h + isneg(h)) { i2 = par(goodmod(x+h,np1d),y,z,np1d); dx = r[i2][0]-r[i][0]; if (dx < negb2) dx += boxsize; if (dx > b2) dx -= boxsize; if (dx*h < 0.) { if (m[i] % 2 > 0) { m[i] *= 2; } break; } } for (h=1; abs(h)<ng4; h = -h + isneg(h)) { i2 = par(x,goodmod(y+h, np1d),z,np1d); dx = r[i2][1]-r[i][1]; if (dx < negb2) dx += boxsize; if (dx > b2) dx -= boxsize; if (dx*h < 0.) { /*printf("y:%d %d %d %d %f\n",x,y,z,h,dx);*/ if (m[i] % 3 > 0) { m[i] *= 3; } break; } } for (h=1; abs(h)<ng4; h = -h + isneg(h)) { i2 = par(x,y,goodmod(z+h, np1d),np1d); dx = r[i2][2]-r[i][2]; if (dx < negb2) dx += boxsize; if (dx > b2) dx -= boxsize; if (dx*h < 0.) { /*printf("z:%d %d %d %d %f\n",x,y,z,h,dx);*/ if (m[i] % 5 > 0) { m[i] *= 5; } break; } } // Now do diagonal directions for (h=1; h<ng4; h = -h + isneg(h)) { i2 = par(x,goodmod(y+h,np1d),goodmod(z+h,np1d),np1d); d1 = r[i2][1]-r[i][1]; d2 = r[i2][2]-r[i][2]; if (d1 < negb2) d1 += boxsize; if (d1 > b2) d1 -= boxsize; if (d2 < negb2) d2 += boxsize; if (d2 > b2) d2 -= boxsize; if ((d1 + d2)*h < 0.) { m0[i] *= 2; break; } } for (h=1; h<ng4; h = -h + isneg(h)) { i2 = par(x,goodmod(y+h,np1d),goodmod(z-h,np1d),np1d); d1 = r[i2][1]-r[i][1]; d2 = r[i2][2]-r[i][2]; if (d1 < negb2) d1 += boxsize; if (d1 > b2) d1 -= boxsize; if (d2 < negb2) d2 += boxsize; if (d2 > b2) d2 -= boxsize; if ((d1 - d2)*h < 0.) { m0[i] *= 3; break; } } // y for (h=1; h<ng4; h = -h + isneg(h)) { i2 = par(goodmod(x+h,np1d),y,goodmod(z+h,np1d),np1d); d1 = r[i2][0]-r[i][0]; d2 = r[i2][2]-r[i][2]; if (d1 < negb2) d1 += boxsize; if (d1 > b2) d1 -= boxsize; if (d2 < negb2) d2 += boxsize; if (d2 > b2) d2 -= boxsize; if ((d1 + d2)*h < 0.) { m1[i] *= 2; break; } } for (h=1; h<ng4; h = -h + isneg(h)) { i2 = par(goodmod(x+h,np1d),y,goodmod(z-h,np1d),np1d); d1 = r[i2][0]-r[i][0]; d2 = r[i2][2]-r[i][2]; if (d1 < negb2) d1 += boxsize; if (d1 > b2) d1 -= boxsize; if (d2 < negb2) d2 += boxsize; if (d2 > b2) d2 -= boxsize; if ((d1 - d2)*h < 0.) { m1[i] *= 3; break; } } // z for (h=1; h<ng4; h = -h + isneg(h)) { i2 = par(goodmod(x+h,np1d),goodmod(y+h,np1d),z,np1d); d1 = r[i2][0]-r[i][0]; d2 = r[i2][1]-r[i][1]; if (d1 < negb2) d1 += boxsize; if (d1 > b2) d1 -= boxsize; if (d2 < negb2) d2 += boxsize; if (d2 > b2) d2 -= boxsize; if ((d1 + d2)*h < 0.) { m2[i] *=2; break; } } for (h=1; h<ng4; h = -h + isneg(h)) { i2 = par(goodmod(x+h,np1d),goodmod(y-h,np1d),z,np1d); d1 = r[i2][0]-r[i][0]; d2 = r[i2][1]-r[i][1]; if (d1 < negb2) d1 += boxsize; if (d1 > b2) d1 -= boxsize; if (d2 < negb2) d2 += boxsize; if (d2 > b2) d2 -= boxsize; if ((d1 - d2)*h < 0.) { m2[i] *= 3; break; } } } } } } nhalo = 0; nhalo0 = 0; nhalo1 = 0; nhalo2 = 0; nhaloany = 0; for (i=0;i<np;i++){ mn = (m[i]%2 == 0) + (m[i]%3 == 0) + (m[i]%5 == 0); m0n = (unsigned char)(m[i]%2 == 0) + (unsigned char)(m0[i]%2 == 0) + (unsigned char)(m0[i]%3 == 0); m1n = (unsigned char)(m[i]%3 == 0) + (unsigned char)(m1[i]%2 == 0) + (unsigned char)(m1[i]%3 == 0); m2n = (unsigned char)(m[i]%5 == 0) + (unsigned char)(m2[i]%2 == 0) + (unsigned char)(m2[i]%3 == 0); m[i] = max(mn,max(m0n,max(m1n,m2n))); if (mn == 3) nhalo++; if (m0n == 3) nhalo0++; if (m1n == 3) nhalo1++; if (m2n == 3) nhalo2++; if (m[i] == 3) { nhaloany++; } } printf("nhalo=%d,%d,%d,%d,%d\n",nhalo,nhalo0,nhalo1,nhalo2,nhaloany); /* Output m */ printf("Writing morphology file to %s.\n",tagoutfile);fflush(stdout); tag = fopen(tagoutfile,"w"); if (tag == NULL) { printf("Unable to open %s\n",tagoutfile); exit(0); } fwrite(&np,1, sizeof(int),tag); fwrite(m,np,sizeof(unsigned char),tag); return(1); }
pr49897-1.c
/* PR middle-end/49897 */ /* { dg-do run } */ extern void abort (void); int main () { int i, j, x = 0, y, sum = 0; #pragma omp parallel reduction(+:sum) { #pragma omp for firstprivate(x) lastprivate(x, y) for (i = 0; i < 10; i++) { x = i; y = 0; #pragma omp parallel reduction(+:sum) { #pragma omp for firstprivate(y) lastprivate(y) for (j = 0; j < 10; j++) { y = j; sum += y; } } } } if (x != 9 || y != 9 || sum != 450) abort (); return 0; }
Triangle.c
#define _LARGEFILE_SOURCE #define _LARGEFILE64_SOURCE #define _FILE_OFFSET_BITS 64 #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <fcntl.h> #include <time.h> #include <math.h> #include <sys/stat.h> #include <sys/mman.h> #include <omp.h> unsigned long long TheFileSize(char const *filename) { struct stat statbuf; int a = stat(filename, &statbuf); if (a == -1) { exit(-1); } unsigned long long filesize = statbuf.st_size; return filesize; } int main(int argc, char const *argv[]) { char const *name; if (argc < 1) { printf("error\n"); exit(1); } else if (argc == 2) name = argv[1]; else if(argc == 3) name = argv[2]; unsigned long long outputsize = TheFileSize(name); printf("The file is %lld Bytes.\n", outputsize); time_t t0 = time(NULL); int openfile = open(name, O_RDONLY); if (openfile == -1) { printf("error\n"); exit(1); } unsigned int *reflect; reflect = mmap(NULL, outputsize, PROT_READ, MAP_SHARED, openfile, 0); if (reflect == NULL || reflect == (void*)-1) { printf("error\n"); close(openfile); exit(-2); } unsigned long long numedge = outputsize/8; printf("There are %lld edges in the file.\n", numedge); long long i,j; unsigned int *front; unsigned int *behind; front = (unsigned int *)malloc(sizeof(unsigned int)*numedge); behind = (unsigned int *)malloc(sizeof(unsigned int)*numedge); #pragma omp parallel for for (i = 0; i < numedge; i++) { if (reflect[2*i] < reflect[2*i+1]) { front[i] = reflect[2*i]; behind[i] = reflect[2*i+1]; } else { front[i] = reflect[2*i+1]; behind[i] = reflect[2*i]; } } munmap(reflect, outputsize); close(openfile); unsigned long long count=0; //#pragma omp parallel for // for (i = 0; i < n_edge; i++) // {for(j=i; j< n_edge; j++) // { // if(front[i] == front[j]&& behind[i] == behind[j]) // {front[j] = behind[j]=0;} // } // } #pragma omp parallel for for (i = 0; i < numedge; i++) { if(front[i]=front[i+1]) { //sumbehind= behind[i]+behind[i+1]; // } // #pragma omp parallel for for(j=0; j< numedge; j++) { if(front[j] == behind[i]) {if(behind[j] == behind[i+1]) {count++; printf("processing");} } } } } if(count&2 == 0) printf("The triangles are %lld.\n", count/2 ); else printf("The triangles are %lld.\n", 1+count/2 ); printf("Time costs %ld sec.\n", time(NULL) - t0 ); }
pr3.c
//Write an OpenMP program to multiply two matrices A & B and find the resultant matrix C #include <omp.h> #include <stdio.h> #include <stdlib.h> #define NRA 62 #define NCA 15 #define NCB 7 int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA],b[NCA][NCB],c[NRA][NCB]; /* matrix A to be multiplied*/ /* matrix B to be multiplied */ /* result matrix C */ /* number of rows in matrix A */ /* number of columns in matrix A */ /* number of columns in matrix B */ chunk = 10; /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; #pragma omp for schedule (static, chunk) for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n",tid); #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) { printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } // End of parallel region /*** Print results ***/ /* set loop iteration chunk size */ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); }
cfl_stats.c
#ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include "od_defs.h" #include "od_filter.h" #include "od_intra.h" #include "image_tools.h" #include "stats_tools.h" #include "../src/dct.h" #include "../src/intra.h" #define PRINT_PROGRESS (0) #define DEBUG_CFL_PRED (0) #define OUTPUT_PLANES (0) #if B_SZ_LOG!=3 # error "Only B_SZ=8 currently supported." #endif typedef struct cfl_stats_ctx cfl_stats_ctx; struct cfl_stats_ctx { int n; int curr_pli; image_data img[3]; intra_stats gb_fdp[2]; intra_stats gb_cfl[2]; intra_stats st_fdp[2]; intra_stats st_cfl[2]; }; static void cfl_stats_ctx_init(cfl_stats_ctx *_this){ int i; _this->n=0; for (i=0;i<2;i++) { intra_stats_init(&_this->gb_fdp[i],B_SZ_LOG-1); intra_stats_init(&_this->gb_cfl[i],B_SZ_LOG-1); intra_stats_init(&_this->st_fdp[i],B_SZ_LOG-1); intra_stats_init(&_this->st_cfl[i],B_SZ_LOG-1); } } static void cfl_stats_ctx_clear(cfl_stats_ctx *_this){ int i; for (i=0;i<2;i++) { intra_stats_clear(&_this->gb_fdp[i]); intra_stats_clear(&_this->gb_cfl[i]); intra_stats_clear(&_this->st_fdp[i]); intra_stats_clear(&_this->st_cfl[i]); } } static void cfl_stats_ctx_combine(cfl_stats_ctx *_a,cfl_stats_ctx *_b) { int i; if (_b->n==0) { return; } for (i=0;i<2;i++) { intra_stats_combine(&_a->gb_fdp[i],&_b->gb_fdp[i]); intra_stats_combine(&_a->gb_cfl[i],&_b->gb_cfl[i]); } _a->n+=_b->n; } static void od_pre_block(void *_ctx,const unsigned char *_data,int _stride, int _bi,int _bj){ cfl_stats_ctx *ctx; #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_pre_block\n"); } #endif ctx=(cfl_stats_ctx *)_ctx; image_data_pre_block(&ctx->img[ctx->curr_pli],_data,_stride,_bi,_bj); } static void od_fdct_block(void *_ctx,const unsigned char *_data,int _stride, int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_fdct_block\n"); } #endif ctx=(cfl_stats_ctx *)_ctx; image_data_fdct_block(&ctx->img[ctx->curr_pli],_bi,_bj); } #if TF_BLOCKS static void od_tf_block(void *_ctx,const unsigned char *_data,int _stride, int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_tf_block\n"); } #endif ctx=(cfl_stats_ctx *)_ctx; image_data_tf_block(&ctx->img[ctx->curr_pli],_bi,_bj); } #endif static void od_mode_block(void *_ctx,const unsigned char *_data,int _stride, int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; ctx=(cfl_stats_ctx *)_ctx; if (ctx->curr_pli==0) { od_coeff block[5*B_SZ_MAX*B_SZ_MAX]; #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_mode_block\n"); } #endif image_data_load_block(&ctx->img[0],_bi,_bj,block); ctx->img[0].mode[ctx->img[0].nxblocks*_bj+_bi]= od_select_mode_satd(block,NULL,ctx->img[0].b_sz_log); } } static void od_fdp_pred_block(void *_ctx,const unsigned char *_data, int _stride,int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; ctx=(cfl_stats_ctx *)_ctx; if (ctx->curr_pli>0) { #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_fdp_pred_block\n"); } #endif ctx->img[ctx->curr_pli].mode[ctx->img[ctx->curr_pli].nxblocks*_bj+_bi]= ctx->img[0].mode[ctx->img[0].nxblocks*_bj+_bi]; image_data_pred_block(&ctx->img[ctx->curr_pli],_bi,_bj); } } static void od_fdp_stats_block(void *_ctx,const unsigned char *_data, int _stride,int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; ctx=(cfl_stats_ctx *)_ctx; if (ctx->curr_pli>0) { #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_fdp_stats_block\n"); } #endif image_data_stats_block(&ctx->img[ctx->curr_pli],_data,_stride,_bi,_bj, &ctx->st_fdp[ctx->curr_pli-1]); } } static void od_cfl_pred_block(void *_ctx,const unsigned char *_data, int _stride,int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; ctx=(cfl_stats_ctx *)_ctx; if (ctx->curr_pli>0) { image_data *img; image_data *img0; int b_sz; int mode; od_coeff p[B_SZ_MAX*B_SZ_MAX]; od_coeff c[2*B_SZ_MAX*2*B_SZ_MAX]; od_coeff l[2*B_SZ_MAX*2*B_SZ_MAX]; unsigned char bsize[8*8]; int bx; int by; int i; int j; double *pred; #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_cfl_pred_block\n"); } #endif img=&ctx->img[ctx->curr_pli]; img0=&ctx->img[0]; b_sz=1<<img->b_sz_log; for (i=0;i<8*8;i++) { bsize[i]=B_SZ_LOG-OD_LOG_BSIZE0; } mode=img->mode[img->nxblocks*_bj+_bi]= ctx->img[0].mode[ctx->img[0].nxblocks*_bj+_bi]; for (by=0;by<2;by++) { for (bx=0;bx<2;bx++) { od_coeff *fdct; fdct=&img->fdct[img->fdct_stride*b_sz*(_bj+by)+b_sz*(_bi+bx)]; for (j=0;j<b_sz;j++) { for (i=0;i<b_sz;i++) { c[(by*b_sz+j)*2*b_sz+bx*b_sz+i]=fdct[img->fdct_stride*j+i]; } } fdct=&img0->fdct[img0->fdct_stride*2*b_sz*(_bj+by)+2*b_sz*(_bi+bx)]; od_resample_luma_coeffs(&l[by*b_sz*2*b_sz+bx*b_sz],2*b_sz,fdct,img0->fdct_stride,1,1,0,1); } } od_chroma_pred(p,c,l,2*b_sz,1,1, img->b_sz_log-OD_LOG_BSIZE0,1,1,bsize,8,OD_INTRA_CHROMA_WEIGHTS_Q8[mode]); pred=&img->pred[img->pred_stride*b_sz*_bj+b_sz*_bi]; for (j=0;j<b_sz;j++) { for (i=0;i<b_sz;i++) { pred[img->pred_stride*j+i]=p[b_sz*j+i]; } } #if DEBUG_CFL_PRED if (ctx->curr_pli>0) { if (_bi==0&&_bj==0) { od_coeff *block; fprintf(stderr,"%i %i\n",_bi,_bj); fprintf(stderr,"luma (time)\n"); block=&img0->pre[img0->pre_stride*b_sz*(2*_bj+1)+b_sz*(2*_bi+1)]; for (j=0;j<4*b_sz;j++) { if (j==2*b_sz) { for (i=0;i<4*b_sz;i++) { fprintf(stderr,"%s-----",i==2*b_sz?"+":""); } fprintf(stderr,"\n"); } for (i=0;i<4*b_sz;i++) { fprintf(stderr,"%s%4i ",i==2*b_sz?"|":"",block[j*img0->pre_stride+i]); } fprintf(stderr,"\n"); } fprintf(stderr,"luma (freq)\n"); block=&img0->fdct[img0->fdct_stride*2*b_sz*_bj+2*b_sz*_bi]; for (j=0;j<4*b_sz;j++) { if (j==2*b_sz) { for (i=0;i<4*b_sz;i++) { fprintf(stderr,"%s-----",i==2*b_sz?"+":""); } fprintf(stderr,"\n"); } for (i=0;i<4*b_sz;i++) { fprintf(stderr,"%s%4i ",i==2*b_sz?"|":"",block[j*img0->fdct_stride+i]); } fprintf(stderr,"\n"); } fprintf(stderr,"luma (resampled)\n"); for (j=0;j<2*b_sz;j++) { if (j==b_sz) { for (i=0;i<2*b_sz;i++) { fprintf(stderr,"%s-----",i==b_sz?"+":""); } fprintf(stderr,"\n"); } for (i=0;i<2*b_sz;i++) { fprintf(stderr,"%s%4i ",i==b_sz?"|":"",l[j*2*b_sz+i]); } fprintf(stderr,"\n"); } fprintf(stderr,"chroma %i\n",ctx->curr_pli); for (j=0;j<2*b_sz;j++) { if (j==b_sz) { for (i=0;i<2*b_sz;i++) { fprintf(stderr,"%s-----",i==b_sz?"+":""); } fprintf(stderr,"\n"); } for (i=0;i<2*b_sz;i++) { fprintf(stderr,"%s%4i ",i==b_sz?"|":"",c[j*2*b_sz+i]); } fprintf(stderr,"\n"); } fprintf(stderr,"pred\n"); for (j=0;j<b_sz;j++) { for (i=0;i<b_sz;i++) { fprintf(stderr,"%4i ",p[b_sz*j+i]); } fprintf(stderr,"\n"); } } } #endif } } static void od_cfl_stats_block(void *_ctx,const unsigned char *_data, int _stride,int _bi,int _bj){ cfl_stats_ctx *ctx; (void)_data; (void)_stride; ctx=(cfl_stats_ctx *)_ctx; if (ctx->curr_pli>0) { #if PRINT_PROGRESS if(_bi==0&&_bj==0){ fprintf(stdout,"in od_cfl_stats_block\n"); } #endif image_data_stats_block(&ctx->img[ctx->curr_pli],_data,_stride,_bi,_bj, &ctx->st_cfl[ctx->curr_pli-1]); } } static int stats_start(void *_ctx,const char *_name, const video_input_info *_info,int _pli,int _nxblocks,int _nyblocks){ cfl_stats_ctx *ctx; ctx=(cfl_stats_ctx *)_ctx; ctx->curr_pli=_pli; image_data_init(&ctx->img[_pli],_name,B_SZ_LOG-(_pli!=0),_nxblocks,_nyblocks); if (_pli==0) { fprintf(stdout,"%s\n",_name); ctx->n++; } if (_pli>0) { intra_stats_reset(&ctx->st_fdp[_pli-1]); intra_stats_reset(&ctx->st_cfl[_pli-1]); } return EXIT_SUCCESS; } static int stats_finish(void *_ctx){ int i; cfl_stats_ctx *ctx; ctx=(cfl_stats_ctx *)_ctx; if (ctx->curr_pli==2) { double *od_scale; od_scale=OD_SCALE[B_SZ_LOG-1-OD_LOG_BSIZE0]; for (i=0;i<2;i++) { char label[128]; sprintf(label,"Frequency-Domain Predictors (plane %i)",i+1); intra_stats_combine(&ctx->gb_fdp[i],&ctx->st_fdp[i]); intra_stats_correct(&ctx->st_fdp[i]); intra_stats_print(&ctx->st_fdp[i],label,od_scale); sprintf(label,"Chroma-from-Luma Predictors (plane %i)",i+1); intra_stats_combine(&ctx->gb_cfl[i],&ctx->st_cfl[i]); intra_stats_correct(&ctx->st_cfl[i]); intra_stats_print(&ctx->st_cfl[i],label,od_scale); } for (i=0;i<3;i++) { image_data_clear(&ctx->img[i]); } } return EXIT_SUCCESS; } const block_func BLOCKS[]={ od_pre_block, od_fdct_block, #if TF_BLOCKS od_tf_block, #endif od_mode_block, od_fdp_pred_block, od_fdp_stats_block, od_cfl_pred_block, od_cfl_stats_block, }; const int NBLOCKS=sizeof(BLOCKS)/sizeof(*BLOCKS); /* There is a bug in get_intra_dims where chroma blocks do not line up under luma blocks. */ static void cfl_get_intra_dims(const video_input_info *_info,int _pli, int _padding,int *_x0,int *_y0,int *_nxblocks,int *_nyblocks){ int xshift; int yshift; int padding; xshift=_pli!=0&&!(_info->pixel_fmt&1); yshift=_pli!=0&&!(_info->pixel_fmt&2); OD_ASSERT(xshift==yshift); padding=_padding<<B_SZ_LOG; /*An offset of 1 would be fine to provide enough context for VP8-style intra prediction, but for frequency-domain prediction, we'll want a full block, plus overlap.*/ *_x0=(_info->pic_x>>xshift)+(padding>>1+xshift); *_y0=(_info->pic_y>>yshift)+(padding>>1+yshift); /*We take an extra block off the end to give enough context for above-right intra prediction.*/ *_nxblocks=_info->pic_w-padding>>B_SZ_LOG; *_nyblocks=_info->pic_h-padding>>B_SZ_LOG; } static int cfl_apply_to_blocks(void *_ctx,int _ctx_sz,int _plmask,int _padding, plane_start_func _start,int _nfuncs,const block_func *_funcs, plane_finish_func _finish,int _argc,const char *_argv[]){ int ai; #pragma omp parallel for schedule(dynamic) for(ai=1;ai<_argc;ai++){ FILE *fin; video_input vid; video_input_info info; video_input_ycbcr ycbcr; int pli; int tid; unsigned char *ctx; fin=fopen(_argv[ai],"rb"); if(fin==NULL){ fprintf(stderr,"Could not open '%s' for reading.\n",_argv[ai]); continue; } if(video_input_open(&vid,fin)<0){ fprintf(stderr,"Error reading video info from '%s'.\n",_argv[ai]); continue; } video_input_get_info(&vid,&info); if(video_input_fetch_frame(&vid,ycbcr,NULL)<0){ fprintf(stderr,"Error reading first frame from '%s'.\n",_argv[ai]); continue; } tid=OD_OMP_GET_THREAD; ctx=((unsigned char *)_ctx)+tid*_ctx_sz; #if OUTPUT_PLANES { char *names[]={"y","cb","cr"}; for (pli=0;pli<3;pli++) { int w; int h; int u; int v; int x0; int y0; const unsigned char *data; w=info.pic_w>>(pli!=0); h=info.pic_h>>(pli!=0); x0=info.pic_x>>(pli!=0); y0=info.pic_y>>(pli!=0); data=ycbcr[pli].data; fprintf(stderr,"%s=[",names[pli]); for (v=0;v<h;v++) { for (u=0;u<w;u++) { if (u>0) { fprintf(stderr,","); } fprintf(stderr,"%i",data[(v+y0)*w+u+x0]); } if (v<h) { fprintf(stderr,";"); } } fprintf(stderr,"];\n"); } } #endif for(pli=0;pli<3;pli++){ if(_plmask&1<<pli){ int x0; int y0; int nxblocks; int nyblocks; cfl_get_intra_dims(&info,pli,_padding,&x0,&y0,&nxblocks,&nyblocks); if(_start!=NULL){ (*_start)(ctx,_argv[ai],&info,pli,nxblocks,nyblocks); } if(_funcs!=NULL){ int f; for(f=0;f<_nfuncs;f++){ if(_funcs[f]!=NULL){ const unsigned char *data; int stride; int bj; int bi; data=ycbcr[pli].data; stride=ycbcr[pli].stride; for(bj=0;bj<nyblocks;bj++){ int y; y=y0+(bj<<B_SZ_LOG-(pli!=0)); for(bi=0;bi<nxblocks;bi++){ int x; x=x0+(bi<<B_SZ_LOG-(pli!=0)); (*_funcs[f])(ctx,&data[stride*y+x],stride,bi,bj); } } } } } if(_finish!=NULL){ (*_finish)(ctx); } } } video_input_close(&vid); } return EXIT_SUCCESS; } #define PADDING (4) #if PADDING<3 # error "PADDING must be at least 3 luma blocks" #endif int main(int _argc,const char *_argv[]) { cfl_stats_ctx ctx[NUM_PROCS]; int i; ne_filter_params_init(); for (i=0;i<OD_NBSIZES;i++) { od_scale_init(OD_SCALE[i],OD_LOG_BSIZE0+i); } for (i=0;i<NUM_PROCS;i++) { cfl_stats_ctx_init(&ctx[i]); } od_intra_init(); OD_OMP_SET_THREADS(NUM_PROCS); cfl_apply_to_blocks(ctx,sizeof(*ctx),0x7,PADDING,stats_start,NBLOCKS,BLOCKS, stats_finish,_argc,_argv); for (i=1;i<NUM_PROCS;i++) { cfl_stats_ctx_combine(&ctx[0],&ctx[i]); } printf("Processed %i image(s)\n",ctx[0].n); if (ctx[0].n>0) { double *od_scale; od_scale=OD_SCALE[B_SZ_LOG-1-OD_LOG_BSIZE0]; for (i=0;i<2;i++) { char label[128]; sprintf(label,"Frequency-Domain Predictors (plane %i)",i+1); intra_stats_correct(&ctx[0].gb_fdp[i]); intra_stats_print(&ctx[0].gb_fdp[i],label,od_scale); sprintf(label,"Chroma-from-Luma Predictors (plane %i)",i+1); intra_stats_correct(&ctx[0].gb_cfl[i]); intra_stats_print(&ctx[0].gb_cfl[i],label,od_scale); } } for (i=0;i<NUM_PROCS;i++) { cfl_stats_ctx_clear(&ctx[i]); } od_intra_clear(); return EXIT_SUCCESS; }
graph.c
/* * graph.h -- Graph implementation * * Copyright (C) 2010-2014, Computing Systems Laboratory (CSLab) * Copyright (C) 2010-2014, Vasileios Karakasis */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "alloc.h" #include "graph.h" graph_t *graph_create(size_t nr_vertices, size_t nr_edges) { graph_t *ret = (graph_t *) malloc(sizeof(*ret)); if (ret) { ret->nr_vertices = nr_vertices; ret->nr_edges = (nr_edges) ? nr_edges : nr_vertices*nr_vertices; ret->weights = (weight_t **) calloc_2d(nr_vertices, nr_vertices, sizeof(weight_t)); ret->i_off = ret->j_off = 0; if (!ret->weights) { free(ret); ret = NULL; } } return ret; } void graph_init_rand(graph_t *g) { srand48(0); for (size_t i = 0; i < g->nr_vertices; ++i) for (size_t j = 0; j < g->nr_vertices; ++j) g->weights[i][j] = WEIGHT_MAX*drand48(); /* Zero-out the diagonal */ for (size_t i = 0; i < g->nr_vertices; ++i) g->weights[i][i] = 0; /* Erase non-existent edges */ size_t nr_non_edges = g->nr_vertices*g->nr_vertices - g->nr_edges; size_t non_edge_cnt = 0; while (non_edge_cnt != nr_non_edges) { size_t i_del = (size_t) (drand48()*g->nr_vertices); size_t j_del = (size_t) (drand48()*g->nr_vertices); if (g->weights[i_del][j_del] != WEIGHT_MAX) { g->weights[i_del][j_del] = WEIGHT_MAX; ++non_edge_cnt; } } } void graph_delete(graph_t *g) { if (g) { if (g->weights) free_2d((void **) g->weights); } free(g); } void graph_print(const graph_t *g) { printf("Number of vertices: %ld\n", g->nr_vertices); printf("Number of edges: %ld\n", g->nr_edges); printf("Non-edge weight: %ld\n", WEIGHT_MAX); printf(">>> Adjacency matrix <<<\n"); for (size_t i = 0; i < g->nr_vertices; ++i) { for (size_t j = 0; j < g->nr_vertices; ++j) printf("%3.2lf ", g->weights[i][j]); printf("\n"); } } graph_t *graph_copy(const graph_t *g) { graph_t *ret = graph_create(g->nr_vertices, g->nr_edges); ret->i_off = g->i_off; ret->j_off = g->j_off; copy_2d((void **) ret->weights, (const void **) g->weights, ret->nr_vertices, ret->nr_vertices, sizeof(**ret->weights)); return ret; } int graph_equals(const graph_t *g1, const graph_t *g2) { if (g1->nr_vertices != g2->nr_vertices || g1->nr_edges != g2->nr_edges) return 0; for (size_t i = 0; i < g1->nr_vertices; ++i) for (size_t j = 0; j < g2->nr_vertices; ++j) if (ABS(g1->weights[i+g1->i_off][j+g1->j_off] - g2->weights[i+g2->i_off][j+g2->j_off]) > EPS) { printf("%f != %f\n", g1->weights[i+g1->i_off][j+g1->j_off], g2->weights[i+g2->i_off][j+g2->j_off]); return 0; } return 1; } /* * All-pairs shortest path using the iterative Floyd-Warshall algorithm. */ graph_t *graph_apsp_base(graph_t *a, graph_t *b, graph_t *c) { weight_t **da = a->weights; weight_t **db = b->weights; weight_t **dc = c->weights; size_t ai_off = a->i_off; size_t aj_off = a->j_off; size_t bi_off = b->i_off; size_t bj_off = b->j_off; size_t ci_off = c->i_off; size_t cj_off = c->j_off; size_t n = a->nr_vertices; for (size_t k = 0; k < n; ++k) for (size_t i = 0; i < n; ++i) for (size_t j = 0; j < n; ++j) da[i+ai_off][j+aj_off] = MIN(da[i+ai_off][j+aj_off], db[i+bi_off][k+bj_off] + dc[k+ci_off][j+cj_off]); return a; } graph_t *graph_apsp_base_omp(graph_t *a, graph_t *b, graph_t *c) { weight_t **da = a->weights; weight_t **db = b->weights; weight_t **dc = c->weights; size_t ai_off = a->i_off; size_t aj_off = a->j_off; size_t bi_off = b->i_off; size_t bj_off = b->j_off; size_t ci_off = c->i_off; size_t cj_off = c->j_off; for (size_t k = 0; k < a->nr_vertices; ++k) #pragma omp parallel for for (size_t i = 0; i < a->nr_vertices; ++i) #pragma omp parallel for for (size_t j = 0; j < a->nr_vertices; ++j) da[i+ai_off][j+aj_off] = MIN(da[i+ai_off][j+aj_off], db[i+bi_off][k+bj_off] + dc[k+ci_off][j+cj_off]); return a; } graph_t **graph_extract_tiles(const graph_t *g, size_t bsize) { size_t nr_tiles_r = g->nr_vertices / bsize; graph_t **ret = (graph_t **) calloc_2d(nr_tiles_r, nr_tiles_r, sizeof(**ret)); for (size_t i = 0; i < nr_tiles_r; ++i) for (size_t j = 0; j < nr_tiles_r; ++j) { ret[i][j].weights = g->weights; ret[i][j].nr_vertices = bsize; ret[i][j].nr_edges = bsize*bsize; ret[i][j].i_off = i*bsize; ret[i][j].j_off = j*bsize; } return ret; } graph_t *graph_apsp_tile(graph_t *g) { graph_t **tiles = graph_extract_tiles(g, CPU_TILE_DIM); size_t nr_blocks = g->nr_vertices / CPU_TILE_DIM; #pragma omp parallel { for (size_t bk = 0; bk < nr_blocks; ++bk) { graph_apsp_base(&tiles[bk][bk], &tiles[bk][bk], &tiles[bk][bk]); /* Compute the same block row and column */ #pragma omp for nowait for (size_t bi = 0; bi < nr_blocks; ++bi) if (bi != bk) { graph_apsp_base(&tiles[bi][bk], &tiles[bi][bk], &tiles[bk][bk]); graph_apsp_base(&tiles[bk][bi], &tiles[bk][bk], &tiles[bk][bi]); } #pragma omp barrier /* Compute the rest of the matrix */ for (size_t bi = 0; bi < nr_blocks; ++bi) { if (bi == bk) continue; #pragma omp for nowait for (size_t bj = 0; bj < nr_blocks; ++bj) if (bj != bk) graph_apsp_base(&tiles[bi][bj], &tiles[bi][bk], &tiles[bk][bj]); } #pragma omp barrier } } free_2d((void **) tiles); return g; }
bl_sgemm_ref.c
#if defined(_WIN32) #else /* * -------------------------------------------------------------------------- * BLISLAB * -------------------------------------------------------------------------- * Copyright (C) 2016, The University of Texas at Austin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name of The University of Texas nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * bl_sgemm_ref.c * * * Purpose: * implement reference mkl using GEMM (optional) in C. * * Todo: * * * Modification: * * * */ #include <omp.h> #include <stdlib.h> #include <limits.h> #include <float.h> #include "bl_sgemm.h" #include "bl_sgemm_ref.h" #ifdef USE_BLAS /* * sgemm prototype * */ //void sgemm(char*, char*, int*, int*, int*, float*, float*, // int*, float*, int*, float*, float*, int*); extern void sgemm_(char*, char*, int*, int*, int*, float*, float*, int*, float*, int*, float*, float*, int*); #endif void bl_sgemm_ref( int m, int n, int k, float *XA, int lda, float *XB, int ldb, float *XC, int ldc ) { // Local variables. int i, j, p; float beg, time_collect, time_sgemm, time_square; float alpha = 1.0, beta = 1.0; // Sanity check for early return. if ( m == 0 || n == 0 || k == 0 ) return; // Reference GEMM implementation. beg = omp_get_wtime(); #ifdef USE_BLAS sgemm_( "N", "N", &m, &n, &k, &alpha, XA, &lda, XB, &ldb, &beta, XC, &ldc ); #else #pragma omp parallel for private( i, p ) for ( j = 0; j < n; j ++ ) { for ( i = 0; i < m; i ++ ) { for ( p = 0; p < k; p ++ ) { XC[ j * ldc + i ] += XA[ p * lda + i ] * XB[ j * ldb + p ]; } } } #endif time_sgemm = omp_get_wtime() - beg; } #endif
openmp_then_init.c
// RUN: %libomp-compile-and-run // REQUIRES: abt #include "omp_testsuite.h" #include <stdio.h> int test_openmp_then_init(int num_init) { int i; int val = 0; #pragma omp parallel num_threads(NUM_TASKS) { #pragma omp master { int initialized = (ABT_initialized() == ABT_SUCCESS); for (i = 0; i < num_init; i++) { ABT_EXIT_IF_FAIL(ABT_init(0, 0)); } val = initialized ? 1 : 0; for (i = 0; i < num_init; i++) { ABT_EXIT_IF_FAIL(ABT_finalize()); } } } return val; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { // Note that Argobots will be initialized once BOLT is instantiated. if (!test_openmp_then_init(i + 1)) { num_failed++; } } return num_failed; }
GB_bitmap_AxB_saxpy_A_bitmap_B_bitmap_template.c
//------------------------------------------------------------------------------ // GB_bitmap_AxB_saxpy_A_bitmap_B_bitmap: C<#M>+=A*B, C bitmap, M any format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is bitmap, A and B are bitmap/full. M has any format. { int64_t tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // get the task to compute C (I,J) //---------------------------------------------------------------------- int64_t I_tid = tid / nI_tasks ; int64_t J_tid = tid % nI_tasks ; // I = istart:iend-1 int64_t istart = I_tid * GB_TILE_SIZE ; int64_t iend = GB_IMIN (avlen, istart + GB_TILE_SIZE) ; // J = jstart:jend-1 int64_t jstart = J_tid * GB_TILE_SIZE ; int64_t jend = GB_IMIN (bvdim, jstart + GB_TILE_SIZE) ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // check if any entry in the M(I,J) mask permits any change to C(I,J) //---------------------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER || GB_MASK_IS_BITMAP_OR_FULL bool any_update_allowed = false ; for (int64_t j = jstart ; j < jend && !any_update_allowed ; j++) { for (int64_t i = istart ; i < iend && !any_update_allowed ; i++) { //---------------------------------------------------------- // get pointer to C(i,j) and M(i,j) //---------------------------------------------------------- int64_t pC = j * avlen + i ; //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse int8_t cb = Cb [pC] ; bool mij = (cb & 2) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; #endif if (Mask_comp) mij = !mij ; if (!mij) continue ; any_update_allowed = true ; } } if (!any_update_allowed) { // C(I,J) cannot be modified at all; skip it continue ; } #endif //---------------------------------------------------------------------- // declare local storage for this task //---------------------------------------------------------------------- // GB_ATYPE Ax_cache [GB_TILE_SIZE * GB_KTILE_SIZE] ; // int8_t Ab_cache [GB_TILE_SIZE * GB_KTILE_SIZE] ; bool Ab_any_in_row [GB_TILE_SIZE] ; //---------------------------------------------------------------------- // C<#M>(I,J) += A(I,:) * B(:,J) //---------------------------------------------------------------------- for (int64_t kstart = 0 ; kstart < avdim ; kstart += GB_KTILE_SIZE) { // K = kstart:kend-1 int64_t kend = GB_IMIN (avdim, kstart + GB_KTILE_SIZE) ; //------------------------------------------------------------------ // TODO: load A(I,K) into local storage //------------------------------------------------------------------ // For built-in semirings, load A(I,K) into local storage of size // GB_TILE_SIZE * GB_KTILE_SIZE and transpose it. Load in the // bitmap Ab if not NULL, and Ax if not NULL. #if 0 for (int64_t k = kstart ; k < kend ; k++) { for (int64_t i = istart ; i < iend ; i++) { int64_t pA = i + k * avlen ; int8_t ab = GBB (Ab, pA) ; i_local = i - istart ; k_local = k - kstart ; Ab_cache [i_local * GB_KTILE_SIZE ... } } #endif //------------------------------------------------------------------ // Check for entries in each row of A(I,K) //------------------------------------------------------------------ if (A_is_bitmap) { for (int i = 0 ; i < GB_TILE_SIZE ; i++) { Ab_any_in_row [i] = false ; } for (int64_t k = kstart ; k < kend ; k++) { for (int64_t i = istart ; i < iend ; i++) { int64_t pA = i + k * avlen ; // get pointer to A(i,k) int8_t ab = Ab [pA] ; // Ab_cache [(i-istart) * GB_KTILE_SIZE + (k-kstart)] // = ab ; Ab_any_in_row [i-istart] |= ab ; } } } //------------------------------------------------------------------ // C<#M>(I,J) += A(I,K) * B(K,J) //------------------------------------------------------------------ for (int64_t j = jstart ; j < jend ; j++) { //-------------------------------------------------------------- // B is bitmap or full: check if any B(K,j) entry exists //-------------------------------------------------------------- if (B_is_bitmap) { int b = 0 ; for (int64_t k = kstart ; k < kend ; k++) { int64_t pB = k + j * bvlen ; // pointer to B(k,j) b += Bb [pB] ; } if (b == 0) { // no entry exists in B(K,j) continue ; } } //-------------------------------------------------------------- // C<#M>(I,j) += A(I,K) * B(K,j) //-------------------------------------------------------------- GB_GET_T_FOR_SECONDJ ; for (int64_t i = istart ; i < iend ; i++) { //---------------------------------------------------------- // skip if A(i,K) has no entries //---------------------------------------------------------- if (A_is_bitmap && !Ab_any_in_row [i - istart]) { continue ; } //---------------------------------------------------------- // get C(i,j) //---------------------------------------------------------- int64_t pC = i + j * avlen ; //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse int8_t cb = Cb [pC] ; bool mij = (cb & 2) ; if (Mask_comp) mij = !mij ; if (!mij) continue ; cb = (cb & 1) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; if (Mask_comp) mij = !mij ; if (!mij) continue ; int8_t cb = Cb [pC] ; #else // no mask int8_t cb = Cb [pC] ; #endif //---------------------------------------------------------- // C(i,j) += A(i,K) * B(K,j) //---------------------------------------------------------- if (cb == 0) { //------------------------------------------------------ // C(i,j) does not yet exist //------------------------------------------------------ for (int64_t k = kstart ; k < kend ; k++) { int64_t pA = i + k * avlen ; // pointer to A(i,k) int64_t pB = k + j * bvlen ; // pointer to B(k,j) if (!GBB (Ab, pA)) continue ; if (!GBB (Bb, pB)) continue ; GB_GET_B_kj ; // get B(k,j) GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) if (cb == 0) { // C(i,j) = A(i,k) * B(k,j) GB_CIJ_WRITE (pC, t) ; Cb [pC] = keep ; cb = keep ; task_cnvals++ ; } else { // C(i,j) += A(i,k) * B(k,j) GB_CIJ_UPDATE (pC, t) ; } } } else { //------------------------------------------------------ // C(i,j) already exists //------------------------------------------------------ #if !GB_IS_ANY_PAIR_SEMIRING for (int64_t k = kstart ; k < kend ; k++) { int64_t pA = i + k * avlen ; // pointer to A(i,k) int64_t pB = k + j * bvlen ; // pointer to B(k,j) if (!GBB (Ab, pA)) continue ; if (!GBB (Bb, pB)) continue ; GB_GET_B_kj ; // get B(k,j) GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) // C(i,j) += A(i,k) * B(k,j) GB_CIJ_UPDATE (pC, t) ; } #endif } } } } cnvals += task_cnvals ; } }
testCentralMomentsNormDouble.c
#include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <math.h> #include <omp.h> #ifdef USE_POLAR void polar2x64( int64_t *state, const double mu, const double sigma, double *res ); #else void wichura2x64( int64_t *state, const double mu, const double sigma, double *res ); #endif // parameters // vector width of SIMD unit w.r.t. size of doubles const int vec_width_in_doubles = VECTOR_WIDTH / 8; // number of random numbers to generate // needs to be divisible by vec_width_in_doubles const uint64_t number_random_numbers = 1000ll * 1000ll * 1000ll; // threshold under which to stop recursion const uint64_t recursion_threshold = 1000ll * 1000ll; // derived parameters // number of calls to polar 4x32 to fill vector registers const int rng_calls_per_loop = vec_width_in_doubles / 2; const int loop_iterations = number_random_numbers / vec_width_in_doubles; // compute the non-scaled central moments of local_number_random_numbers // normal random numbers void computeCentralMoments( int64_t *state, const int64_t local_number_random_numbers, const double mu, const double sigma, double *centralMoments ) { // vector for random numbers double r[ vec_width_in_doubles ]; // vector to hold intermediate vector with r^2 double r2[ vec_width_in_doubles ]; // vector to hold intermediate values double intermediate[ vec_width_in_doubles ]; // vectors for central moments double cm02[ vec_width_in_doubles ]; double cm04[ vec_width_in_doubles ]; double cm06[ vec_width_in_doubles ]; double cm08[ vec_width_in_doubles ]; double cm10[ vec_width_in_doubles ]; double cm12[ vec_width_in_doubles ]; double cm14[ vec_width_in_doubles ]; double cm16[ vec_width_in_doubles ]; double cm18[ vec_width_in_doubles ]; double cm20[ vec_width_in_doubles ]; // iteration variable uint64_t i; int j; // initialize central moments to 0 for( j = 0; j < vec_width_in_doubles; j++ ) { cm02[ j ] = 0.; cm04[ j ] = 0.; cm06[ j ] = 0.; cm08[ j ] = 0.; cm10[ j ] = 0.; cm12[ j ] = 0.; cm14[ j ] = 0.; cm16[ j ] = 0.; cm18[ j ] = 0.; cm20[ j ] = 0.; } // compute central moments for( i = 0; i < local_number_random_numbers; i += vec_width_in_doubles ) { // fill r with random numbers for( j = 0; j < rng_calls_per_loop; j++ ) { #ifdef USE_POLAR polar2x64( state, mu, sigma, &r[ j * 2 ] ); #else wichura2x64( state, mu, sigma, &r[ j * 2 ] ); #endif } // iteratively compute central moments #pragma omp simd for( j = 0; j < vec_width_in_doubles; j++ ) { // foundation for central moments r[ j ] = r[ j ] - mu; // always need to multiply by r^2 r2[ j ] = r[ j ] * r[ j ]; // initialize intermediate to r2 intermediate[ j ] = r2[ j ]; // central moments cm02[ j ] += r2[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm04[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm06[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm08[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm10[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm12[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm14[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm16[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm18[ j ] += intermediate[ j ]; intermediate[ j ] = intermediate[ j ] * r2[ j ]; cm20[ j ] += intermediate[ j ]; } } // compute final central moments for( j = 0; j < 10; j++ ) { centralMoments[ j ] = 0.; } #pragma omp simd for( j = 0; j < vec_width_in_doubles; j++ ) { centralMoments[ 0 ] += cm02[ j ]; centralMoments[ 1 ] += cm04[ j ]; centralMoments[ 2 ] += cm06[ j ]; centralMoments[ 3 ] += cm08[ j ]; centralMoments[ 4 ] += cm10[ j ]; centralMoments[ 5 ] += cm12[ j ]; centralMoments[ 6 ] += cm14[ j ]; centralMoments[ 7 ] += cm16[ j ]; centralMoments[ 8 ] += cm18[ j ]; centralMoments[ 9 ] += cm20[ j ]; } } void recursion( int64_t *state, const int64_t local_number_random_numbers, const double mu, const double sigma, double *centralMoments ) { // do we keep on recursing if( local_number_random_numbers <= recursion_threshold ) { // no, call function directly computeCentralMoments( state, local_number_random_numbers, mu, sigma, centralMoments ); } else { // yes double centralMomentsLeft[ 10 ]; double centralMomentsRight[ 10 ]; // init left branch recursion( state, local_number_random_numbers / 2ll, mu, sigma, centralMomentsLeft ); // init right branch recursion( state, local_number_random_numbers / 2ll, mu, sigma, centralMomentsRight ); // compute sum of both branches int j; for( j = 0; j < 10; j++ ) { centralMoments[ j ] = centralMomentsLeft[ j ] + centralMomentsRight[ j ]; } } return; } int main() { // state for RNG int64_t state[ 4 ]; // parameters of normal distribution const double mu = 0.; const double sigma = 1.; // double factorial long double_fac = 1; // timing double startTime, stopTime; // final central moments double final_cm[ 10 ]; // relative error double relative_error; // iteration variables int j; // store number of threads int num_threads; printf( "Run test testCentralMomentsNormDouble\n\n" ); // timing startTime = omp_get_wtime(); // init the rng state[ 0 ] = 0; state[ 1 ] = 0; state[ 2 ] = 0; state[ 3 ] = 0; // compute central moments recursion( state, number_random_numbers, mu, sigma, final_cm ); // timing stopTime = omp_get_wtime(); printf( "Timing: %es\n\n", stopTime - startTime ); // print out results for( j = 0; j < 10; j++ ) { double_fac = double_fac * ( 2 * j + 1 ); final_cm[ j ] = final_cm[ j ] / number_random_numbers; relative_error = fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac; if( j < 4 ) { if( relative_error > 1e-3 ) { printf( "Error!\n" ); printf( "%02dth numerical central moment: %e, exact central moment: %9ld, relative error: %e\n", j * 2 + 2, final_cm[ j ], double_fac, fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac ); exit( 1 ); } } else if( j < 8 ) { if( relative_error > 1e-2 ) { printf( "Error!\n" ); printf( "%02dth numerical central moment: %e, exact central moment: %9ld, relative error: %e\n", j * 2 + 2, final_cm[ j ], double_fac, fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac ); exit( 1 ); } } else { if( relative_error > 2e-2 ) { printf( "Error!\n" ); printf( "%02dth numerical central moment: %e, exact central moment: %9ld, relative error: %e\n", j * 2 + 2, final_cm[ j ], double_fac, fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac ); exit( 1 ); } } } return 0; }
nvptx_param_translate.c
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s // expected-no-diagnostics // CHECK: [[MAP_FN:%.+]] = load void (i8*, ...)*, void (i8*, ...)** % // CHECK: [[FN:%.+]] = bitcast void (i8*, ...)* [[MAP_FN]] to void (i8*, // CHECK: call void [[FN]](i8* % int main() { double a, b; #pragma omp target map(tofrom \ : a) map(to \ : b) { #pragma omp taskgroup #pragma omp task shared(a) a = b; } return 0; }
whirlpool_fmt_plug.c
/* whirlpool cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_whirlpool_0; extern struct fmt_main fmt_whirlpool_1; extern struct fmt_main fmt_whirlpool; #elif FMT_REGISTERS_H john_register_one(&fmt_whirlpool_0); john_register_one(&fmt_whirlpool_1); john_register_one(&fmt_whirlpool); #else #include <string.h> #include "arch.h" #include "openssl_local_overrides.h" #include "sph_whirlpool.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/opensslv.h> #if (AC_BUILT && HAVE_WHIRLPOOL) || \ (!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL) #include <openssl/whrlpool.h> #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "Whirpool" #define FORMAT_NAME "" #define FORMAT_TAG "$whirlpool$" #define TAG_LENGTH 11 #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 128 #define BINARY_SIZE 64 #define SALT_SIZE 0 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 static struct fmt_tests whirlpool_0_tests[] = { {"B3E1AB6EAF640A34F784593F2074416ACCD3B8E62C620175FCA0997B1BA2347339AA0D79E754C308209EA36811DFA40C1C32F1A2B9004725D987D3635165D3C8", ""}, // repeat hash in exactly the same form that is used in john.pot {FORMAT_TAG "B3E1AB6EAF640A34F784593F2074416ACCD3B8E62C620175FCA0997B1BA2347339AA0D79E754C308209EA36811DFA40C1C32F1A2B9004725D987D3635165D3C8", ""}, {NULL} }; static struct fmt_tests whirlpool_1_tests[] = { {"470F0409ABAA446E49667D4EBE12A14387CEDBD10DD17B8243CAD550A089DC0FEEA7AA40F6C2AAAB71C6EBD076E43C7CFCA0AD32567897DCB5969861049A0F5A", ""}, // repeat hash in exactly the same form that is used in john.pot {FORMAT_TAG "470F0409ABAA446E49667D4EBE12A14387CEDBD10DD17B8243CAD550A089DC0FEEA7AA40F6C2AAAB71C6EBD076E43C7CFCA0AD32567897DCB5969861049A0F5A", ""}, {NULL} }; static struct fmt_tests whirlpool_tests[] = { {"19FA61D75522A4669B44E39C1D2E1726C530232130D407F89AFEE0964997F7A73E83BE698B288FEBCF88E3E03C4F0757EA8964E59B63D93708B138CC42A66EB3", ""}, // repeat hash in exactly the same form that is used in john.pot {FORMAT_TAG "19FA61D75522A4669B44E39C1D2E1726C530232130D407F89AFEE0964997F7A73E83BE698B288FEBCF88E3E03C4F0757EA8964E59B63D93708B138CC42A66EB3", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (hexlen(p) != CIPHERTEXT_LENGTH) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strupr(out + TAG_LENGTH); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_0(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_whirlpool0_context ctx; sph_whirlpool0_init(&ctx); sph_whirlpool0(&ctx, saved_key[index], strlen(saved_key[index])); sph_whirlpool0_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_1(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_whirlpool1_context ctx; sph_whirlpool1_init(&ctx); sph_whirlpool1(&ctx, saved_key[index], strlen(saved_key[index])); sph_whirlpool1_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_2(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { #if (AC_BUILT && HAVE_WHIRLPOOL) || \ (!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL) WHIRLPOOL_CTX ctx; WHIRLPOOL_Init(&ctx); WHIRLPOOL_Update(&ctx, saved_key[index], strlen(saved_key[index])); WHIRLPOOL_Final((unsigned char*)crypt_out[index], &ctx); #else sph_whirlpool_context ctx; sph_whirlpool_init(&ctx); sph_whirlpool(&ctx, saved_key[index], strlen(saved_key[index])); sph_whirlpool_close(&ctx, (unsigned char*)crypt_out[index]); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void whirlpool_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_whirlpool_0 = { { "whirlpool0", "", "WHIRLPOOL-0 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, whirlpool_0_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, whirlpool_set_key, get_key, fmt_default_clear_keys, crypt_0, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_whirlpool_1 = { { "whirlpool1", "", "WHIRLPOOL-1 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, whirlpool_1_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, whirlpool_set_key, get_key, fmt_default_clear_keys, crypt_1, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_whirlpool = { { "whirlpool", "", "WHIRLPOOL " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, whirlpool_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, whirlpool_set_key, get_key, fmt_default_clear_keys, crypt_2, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
libperf_int.h
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2016. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifndef LIBPERF_INT_H_ #define LIBPERF_INT_H_ #include <tools/perf/api/libperf.h> BEGIN_C_DECLS /** @file libperf_int.h */ #include <ucs/async/async.h> #include <ucs/time/time.h> #include <ucs/sys/math.h> #if _OPENMP #include <omp.h> #endif #define TIMING_QUEUE_SIZE 2048 #define UCT_PERF_TEST_AM_ID 5 #define ADDR_BUF_SIZE 2048 #define EXTRA_INFO_SIZE 256 #define UCX_PERF_TEST_FOREACH(perf) \ while (!ucx_perf_context_done(perf)) #define rte_call(_perf, _func, ...) \ ((_perf)->params.rte->_func((_perf)->params.rte_group, ## __VA_ARGS__)) typedef struct ucx_perf_context ucx_perf_context_t; typedef struct uct_peer uct_peer_t; typedef struct ucp_perf_request ucp_perf_request_t; typedef struct ucx_perf_thread_context ucx_perf_thread_context_t; struct ucx_perf_allocator { ucs_memory_type_t mem_type; ucs_status_t (*init)(ucx_perf_context_t *perf); ucs_status_t (*uct_alloc)(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem); void (*uct_free)(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem); void (*memcpy)(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count); void* (*memset)(void *dst, int value, size_t count); }; struct ucx_perf_context { ucx_perf_params_t params; /* Buffers */ void *send_buffer; void *recv_buffer; /* Measurements */ double start_time_acc; /* accurate start time */ ucs_time_t end_time; /* inaccurate end time (upper bound) */ ucs_time_t prev_time; /* time of previous iteration */ ucs_time_t report_interval; /* interval of showing report */ ucx_perf_counter_t max_iter; /* Measurements of current/previous **report** */ struct { ucx_perf_counter_t msgs; /* number of messages */ ucx_perf_counter_t bytes; /* number of bytes */ ucx_perf_counter_t iters; /* number of iterations */ ucs_time_t time; /* inaccurate time (for median and report interval) */ double time_acc; /* accurate time (for avg latency/bw/msgrate) */ } current, prev; ucs_time_t timing_queue[TIMING_QUEUE_SIZE]; unsigned timing_queue_head; const ucx_perf_allocator_t *send_allocator; const ucx_perf_allocator_t *recv_allocator; char extra_info[EXTRA_INFO_SIZE]; union { struct { ucs_async_context_t async; uct_component_h cmpt; uct_md_h md; uct_worker_h worker; uct_iface_h iface; uct_peer_t *peers; uct_allocated_memory_t send_mem; uct_allocated_memory_t recv_mem; uct_iov_t *iov; } uct; struct { ucp_context_h context; ucx_perf_thread_context_t* tctx; ucp_worker_h worker; ucp_ep_h ep; ucp_rkey_h rkey; unsigned long remote_addr; ucp_mem_h send_memh; ucp_mem_h recv_memh; ucp_dt_iov_t *send_iov; ucp_dt_iov_t *recv_iov; void *am_hdr; } ucp; }; }; struct ucx_perf_thread_context { pthread_t pt; int tid; ucs_status_t status; ucx_perf_context_t perf; ucx_perf_result_t result; }; struct uct_peer { uct_ep_h ep; unsigned long remote_addr; uct_rkey_bundle_t rkey; }; struct ucp_perf_request { void *context; }; typedef struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs_t; extern ucx_perf_funcs_t ucx_perf_funcs[]; unsigned rte_peer_index(unsigned group_size, unsigned group_index); void ucx_perf_test_start_clock(ucx_perf_context_t *perf); void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index); void uct_perf_iface_flush_b(ucx_perf_context_t *perf); ucs_status_t uct_perf_test_dispatch(ucx_perf_context_t *perf); ucs_status_t ucp_perf_test_dispatch(ucx_perf_context_t *perf); void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result); void uct_perf_barrier(ucx_perf_context_t *perf); void ucp_perf_thread_barrier(ucx_perf_context_t *perf); void ucp_perf_barrier(ucx_perf_context_t *perf); ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf); void ucp_perf_test_free_mem(ucx_perf_context_t *perf); ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf); void uct_perf_test_free_mem(ucx_perf_context_t *perf); ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params); void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params); /** * Get the total length of the message size given by parameters */ size_t ucx_perf_get_message_size(const ucx_perf_params_t *params); void ucx_perf_report(ucx_perf_context_t *perf); static UCS_F_ALWAYS_INLINE int ucx_perf_context_done(ucx_perf_context_t *perf) { return ucs_unlikely((perf->current.iters >= perf->max_iter) || (perf->current.time > perf->end_time)); } static inline void ucx_perf_get_time(ucx_perf_context_t *perf) { perf->current.time_acc = ucs_get_accurate_time(); } static inline void ucx_perf_omp_barrier(ucx_perf_context_t *perf) { #if _OPENMP if (perf->params.thread_count > 1) { #pragma omp barrier } #endif } static UCS_F_ALWAYS_INLINE void ucx_perf_update(ucx_perf_context_t *perf, ucx_perf_counter_t iters, size_t bytes) { perf->current.time = ucs_get_time(); perf->current.iters += iters; perf->current.bytes += bytes; perf->current.msgs += 1; perf->timing_queue[perf->timing_queue_head] = perf->current.time - perf->prev_time; ++perf->timing_queue_head; if (perf->timing_queue_head == TIMING_QUEUE_SIZE) { perf->timing_queue_head = 0; } perf->prev_time = perf->current.time; if (ucs_unlikely((perf->current.time - perf->prev.time) >= perf->report_interval)) { ucx_perf_report(perf); } } END_C_DECLS #endif
SceneGraphConverterOCC.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <unordered_set> #include <osg/Material> #include <osg/Geode> #include <osg/CullFace> #include <osg/Point> #include <osg/Switch> #include <osgText/Text> #include <BRepAdaptor_Curve.hxx> #include <BRep_Tool.hxx> #include <BRepMesh_IncrementalMesh.hxx> #include <GCPnts_AbscissaPoint.hxx> #include <GCPnts_UniformAbscissa.hxx> #include <Geom_Line.hxx> #include <Poly.hxx> #include <TopExp.hxx> #include <TopExp_Explorer.hxx> #include <TopoDS.hxx> #include <TopoDS_Edge.hxx> #include <TopoDS_Shape.hxx> #include <TopoDS_Vertex.hxx> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/model/BuildingModel.h> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h> #include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include "GeometryInputDataOCC.h" class ScenegraphConverterOCC : public StatusCallback { protected: std::map<int, osg::ref_ptr<osg::Switch> > m_map_entity_id_to_switch; // Map: IfcProduct ID -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_to_switch; // Map: Representation identifier -> scenegraph switch shared_ptr<GeometrySettings> m_geom_settings; double m_recent_progress = 0; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ScenegraphConverterOCC( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings( geom_settings ) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ScenegraphConverterOCC() {} // after calling convertToOSG, the OSG Switches are in the map returned by this method const std::map<int, osg::ref_ptr<osg::Switch> >& getMapIdSwitch() { return m_map_entity_id_to_switch; } struct RenderOptions { RenderOptions(){} RenderOptions( osg::Vec4f color, double distance_between_points_in_mm = 0.5, bool create_points_along_straight_line = false ) { m_color = color; m_color_set = true; m_distance_between_points_in_mm = distance_between_points_in_mm; m_create_points_along_straight_line = create_points_along_straight_line; } osg::Vec4f m_color; bool m_color_set = false; double m_distance_between_points_in_mm = 0.5; bool m_create_points_along_straight_line = false; }; void clearInputCache() { m_map_entity_id_to_switch.clear(); m_map_representation_to_switch.clear(); m_vec_existing_statesets.clear(); } static void getEdgePoints( const TopoDS_Edge& edge, osg::Vec3Array* vertices, const RenderOptions& render_options ) { Standard_Real first = 0; Standard_Real last = 1; Handle( Geom_Curve ) c = BRep_Tool::Curve( edge, first, last ); bool discretize_points_on_straight_line = render_options.m_create_points_along_straight_line; if( c->DynamicType() == STANDARD_TYPE( Geom_Line ) && !discretize_points_on_straight_line ) { // just straight line const TopoDS_Vertex& v1 = TopExp::FirstVertex( edge ); const TopoDS_Vertex& v2 = TopExp::LastVertex( edge ); gp_Pnt point1 = BRep_Tool::Pnt( v1 ); gp_Pnt point2 = BRep_Tool::Pnt( v2 ); vertices->push_back( osg::Vec3d( point1.X(), point1.Y(), point1.Z() ) ); vertices->push_back( osg::Vec3d( point2.X(), point2.Y(), point2.Z() ) ); } else { double param_range = last - first; BRepAdaptor_Curve curve_adaptor(edge); //curve_adaptor.Initialize( edge ); #ifdef _DEBUG const TopoDS_Vertex& v1 = TopExp::FirstVertex( edge ); const TopoDS_Vertex& v2 = TopExp::LastVertex( edge ); gp_Pnt point1 = BRep_Tool::Pnt( v1 ); gp_Pnt point2 = BRep_Tool::Pnt( v2 ); #endif Standard_Real length_of_edge = GCPnts_AbscissaPoint::Length( curve_adaptor ); double distance = render_options.m_distance_between_points_in_mm; double num_points = 40*param_range/(2.0*M_PI); distance = length_of_edge/num_points; GCPnts_UniformAbscissa uniform_abscissa; uniform_abscissa.Initialize( curve_adaptor, distance ); if( uniform_abscissa.IsDone() ) { int nb_points = uniform_abscissa.NbPoints(); for( int i = 0; i < nb_points; ++i ) { Standard_Real parameter = uniform_abscissa.Parameter( i + 1 ); gp_Pnt pnt = curve_adaptor.Value( parameter ); vertices->push_back( osg::Vec3d( pnt.X(), pnt.Y(), pnt.Z() ) ); if( i > 0 && i < nb_points - 1 ) { vertices->push_back( osg::Vec3d( pnt.X(), pnt.Y(), pnt.Z() ) ); } } if( vertices->size()> 0 ) { if( vertices->size()%2 != 0 ) { vertices->push_back( vertices->back() ); } } } } } static void drawShape( const TopoDS_Shape& shape, osg::Geode* parent_geode, const RenderOptions& render_options ) { if( shape.IsNull() ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_lines = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_tri_storage = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> normals_tri_storage = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; #ifdef _DEBUG osg::ref_ptr<osg::Vec3Array> vertices_triangle_edges = new osg::Vec3Array(); #endif TopAbs_ShapeEnum shape_type = shape.ShapeType(); if( shape_type == TopAbs_WIRE || shape_type == TopAbs_EDGE || shape_type == TopAbs_VERTEX ) { TopExp_Explorer Ex; for( Ex.Init( shape, TopAbs_EDGE ); Ex.More(); Ex.Next() ) { TopoDS_Edge edge = TopoDS::Edge( Ex.Current() ); getEdgePoints( edge, vertices_lines, render_options ); } } else { Standard_Real linear_tolerance = 0.06*0.001; // for [m] Standard_Real angular_tolerance = 0.5; bool is_relative = false; BRepMesh_IncrementalMesh incremental_mesh( shape, linear_tolerance, is_relative, angular_tolerance ); TopExp_Explorer shape_explorer( shape, TopAbs_FACE ); for( ; shape_explorer.More(); shape_explorer.Next() ) { const TopoDS_Face& face = TopoDS::Face( shape_explorer.Current() ); TopLoc_Location L = TopLoc_Location(); const Handle( Poly_Triangulation )& poly_triangulation = BRep_Tool::Triangulation( face, L ); if( poly_triangulation.IsNull() ) { continue; } const gp_Trsf & face_trsf = L.Transformation(); Poly::ComputeNormals( poly_triangulation ); const TColgp_Array1OfPnt& triang_vertices = poly_triangulation->Nodes(); const TShort_Array1OfShortReal& triang_normals = poly_triangulation->Normals(); const Poly_Array1OfTriangle& triangles = poly_triangulation->Triangles(); // Number of nodes in the triangulation int num_vertices = poly_triangulation->Nodes().Length(); if( num_vertices*3 != triang_normals.Length() ) { std::cout << "Different number of normals and vertices\n"; return; } if( !vertices_tri_storage ) { vertices_tri_storage = new osg::Vec3Array(); } size_t offset_vertex_storage = vertices_tri_storage->size(); if( !normals_tri_storage ) { normals_tri_storage = new osg::Vec3Array(); } //size_t offset_normals_storage = normals_tri_storage->size(); // Get each vertex index, checking common vertices between shapes for( int i = 0; i < num_vertices; i++ ) { gp_Pnt triang_point = triang_vertices.Value( i+1 ); gp_Vec normal( triang_normals.Value( i*3 + 1 ), triang_normals.Value( i*3 + 2 ), triang_normals.Value( i*3 + 3 ) ); if( face_trsf.Form() != gp_Identity ) { triang_point.Transform( face_trsf ); normal.Transform( face_trsf ); } double x = std::round( triang_point.X()*10.0 )*0.1; double y = std::round( triang_point.Y()*10.0 )*0.1; double z = std::round( triang_point.Z()*10.0 )*0.1; vertices_tri_storage->push_back( osg::Vec3d( x, y, z ) ); normals_tri_storage->push_back( osg::Vec3d( normal.X(), normal.Y(), normal.Z() ) ); } if( !vertices_tri ) { vertices_tri = new osg::Vec3Array(); } if( !normals_tri ) { normals_tri = new osg::Vec3Array(); } int num_stored_vertices = vertices_tri_storage->size(); for( auto it = triangles.begin(); it != triangles.end(); ++it ) { const Poly_Triangle& triang = *it; int idx_tri1, idx_tri2, idx_tri3; triang.Get( idx_tri1, idx_tri2, idx_tri3 ); int idx1 = offset_vertex_storage + idx_tri1 - 1; int idx2 = offset_vertex_storage + idx_tri2 - 1; int idx3 = offset_vertex_storage + idx_tri3 - 1; if( idx1 >= num_stored_vertices || idx2 >= num_stored_vertices || idx3 >= num_stored_vertices ) { std::cout << "idx > num_stored_vertices" << std::endl; continue; } osg::Vec3 v1 = vertices_tri_storage->at( idx1 ); osg::Vec3 v2 = vertices_tri_storage->at( idx2 ); osg::Vec3 v3 = vertices_tri_storage->at( idx3 ); vertices_tri->push_back( v1 ); vertices_tri->push_back( v2 ); vertices_tri->push_back( v3 ); osg::Vec3 n1 = normals_tri_storage->at( idx1 ); osg::Vec3 n2 = normals_tri_storage->at( idx2 ); osg::Vec3 n3 = normals_tri_storage->at( idx3 ); normals_tri->push_back( n1 ); normals_tri->push_back( n2 ); normals_tri->push_back( n3 ); } } } if( vertices_tri->size() > 0 ) { if( vertices_tri->size() == normals_tri->size() ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); parent_geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); colors_normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); parent_geode->addDrawable( geometry_normals ); #endif } } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); parent_geode->addDrawable( geometry ); } } if( vertices_lines->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_lines ); if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_lines->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); parent_geode->addDrawable( geometry ); } #ifdef _DEBUG if( vertices_triangle_edges->size() > 0 && false ) { { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_triangle_edges ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices_triangle_edges->size(), osg::Vec4f( 0.6f, 0.7f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_triangle_edges->size() ) ); parent_geode->addDrawable( geometry ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } } #endif } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } AppearanceData::GeometryTypeEnum geom_type = appearance->m_apply_to_geometry_type; if( geom_type == AppearanceData::GEOM_TYPE_SURFACE || geom_type == AppearanceData::GEOM_TYPE_ANY ) { osg::StateSet* item_stateset = convertToOSGStateSet( appearance ); if( item_stateset != nullptr ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( geom_type == AppearanceData::GEOM_TYPE_CURVE ) { //osg::Vec4f color_lines( appearance->m_color_ambient.m_r, appearance->m_color_ambient.m_g, appearance->m_color_ambient.m_b, appearance->m_color_ambient.m_a ); //GeomUtils::setColorToLines( grp, color_lines ); } } } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeDataOCC>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } RenderOptions render_options; shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { return; } const int product_id = ifc_product->m_entity_id; std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; // create OSG objects std::vector<shared_ptr<RepresentationDataOCC> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationDataOCC>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeDataOCC> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeDataOCC>& item_input_data = product_items[i_item]; osg::ref_ptr<osg::Group> item_group = new osg::Group(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii_shapes = 0; ii_shapes < item_input_data->getShapes().size(); ++ii_shapes ) { const TopoDS_Shape& item_shape = item_input_data->getShapes()[ii_shapes]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawShape( item_shape, geode, render_options ); // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); if( geode->getNumDrawables() > 0 ) { item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", open shape " << ii_shapes; geode->setName( strs_item_shape_name.str().c_str() ); #endif } } // create shape for points const std::vector<TopoDS_Vertex>& vertex_points = item_input_data->getVertexPoints(); if( vertex_points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t ii_vertex_point = 0; ii_vertex_point < vertex_points.size(); ++ii_vertex_point ) { const TopoDS_Vertex& vertex_input = vertex_points[ii_vertex_point]; if( !vertex_input.IsNull() ) { gp_Pnt point1 = BRep_Tool::Pnt( vertex_input ); vertices->push_back( osg::Vec3d( point1.X(), point1.Y(), point1.Z() ) ); } } if( vertices->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", vertex_point "; geode->setName( strs_item_shape_name.str().c_str() ); #endif } else { std::cout << __FUNC__ << ": unexpected vertices->size() == 0" << std::endl; } } // create shape for polylines for( size_t ii_shapes = 0; ii_shapes < item_input_data->getPolylines().size(); ++ii_shapes ) { const TopoDS_Wire& polyline_data = item_input_data->getPolylines()[ii_shapes]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); RenderOptions render_options_polyline; if( item_input_data->getAppearances().size() > 0 ) { for( size_t ii_appearances = 0; ii_appearances < item_input_data->getAppearances().size(); ++ii_appearances ) { const shared_ptr<AppearanceData>& appearance = item_input_data->getAppearances()[ii_appearances]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::Vec4f color_lines( appearance->m_color_ambient.m_r, appearance->m_color_ambient.m_g, appearance->m_color_ambient.m_b, appearance->m_color_ambient.m_a ); render_options_polyline.m_color = color_lines; render_options_polyline.m_color_set = true; break; } } } drawShape( polyline_data, geode, render_options_polyline ); if( geode->getNumDrawables() > 0 ) { item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", polylines " << ii_shapes; geode->setName( strs_item_shape_name.str().c_str() ); #endif } } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_input_data->getTextItems().size(); ++ii ) { const shared_ptr<TextItemDataOCC>& text_data = item_input_data->getTextItems()[ii]; if( !text_data ) { continue; } gp_Trsf& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); gp_XYZ pos_translation = text_pos.TranslationPart(); osg::Vec3 pos2( pos_translation.X(), pos_translation.Y(), pos_translation.Z() );// text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_input_data->getAppearances().size() > 0 ) { applyAppearancesToGroup( item_input_data->getAppearances(), item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f, true ); } } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( std::map<int, shared_ptr<ProductShapeDataOCC> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_id_to_switch.clear(); m_map_representation_to_switch.clear(); shared_ptr<ProductShapeDataOCC> ifc_project_data; std::vector<shared_ptr<ProductShapeDataOCC> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeDataOCC> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<int, osg::ref_ptr<osg::Switch> >* map_entity_id = &m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_products) shared(map_entity_id, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,10) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeDataOCC>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def( ifc_object_def_weak ); std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( Standard_Failure& sf ) { thread_err << sf.GetMessageString(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_switch->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_id->insert( std::make_pair( product_id, product_switch ) ); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } } void addNodes( const std::map<int, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { int product_id = it_product_shapes->first; auto it_find = m_map_entity_id_to_switch.find( product_id ); if( it_find != m_map_entity_id_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } bool inParentList( const int entity_id, osg::Group* group ) { if( !group ) { return false; } const osg::Group::ParentList& vec_parents = group->getParents(); for( size_t ii = 0; ii < vec_parents.size(); ++ii ) { osg::Group* parent = vec_parents[ii]; if( parent ) { const std::string parent_name = parent->getName(); if( parent_name.length() > 0 ) { if( parent_name.at( 0 ) == '#' ) { // extract entity id std::string parent_name_id = parent_name.substr( 1 ); size_t last_index = parent_name_id.find_first_not_of( "0123456789" ); std::string id_str = parent_name_id.substr( 0, last_index ); const int id = std::stoi( id_str.c_str() ); if( id == entity_id ) { return true; } bool in_parent_list = inParentList( entity_id, parent ); if( in_parent_list ) { return true; } } } } } return false; } void resolveProjectStructure( const shared_ptr<ProductShapeDataOCC>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> object_def( product_data->m_ifc_object_definition ); const int entity_id = object_def->m_entity_id; if( SceneGraphUtils::inParentList( entity_id, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeDataOCC> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeDataOCC>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); resolveProjectStructure( child_product_data, group_subparts ); if( group_subparts->getNumChildren() > 0 ) { if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); } } auto it_product_map = m_map_entity_id_to_switch.find( entity_id ); if( it_product_map != m_map_entity_id_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << "#" << entity_id << "=" << object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } osg::StateSet* convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence ) { if( !appearence ) { return nullptr; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( abs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( abs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( abs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( abs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( abs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( abs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( abs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( abs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( abs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( abs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( abs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( abs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( abs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used return stateset_existing; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = new osg::StateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( stateset ); } return stateset; } };
PMultiOP.h
#ifndef PMultiOP #define PMultiOP /* * PMultiOP.h: * pointwise multiplication * * Created on: Apr 21, 2017 * Author: mszhang */ #include "Eigen/Dense" #include "MyLib.h" #include "Node.h" #include "Graph.h" class PMultiNode : public Node { public: PNode in1, in2; public: PMultiNode() : Node() { in1 = NULL; in2 = NULL; node_type = "point-multiply"; } public: virtual inline void clearValue() { Node::clearValue(); in1 = NULL; in2 = NULL; } public: void forward(Graph *cg, PNode x1, PNode x2) { in1 = x1; in2 = x2; degree = 0; x1->addParent(this); x2->addParent(this); cg->addNode(this); } public: inline void compute() { val.vec() = in1->val.vec() * in2->val.vec(); } void backward() { in1->loss.vec() += loss.vec() * in2->val.vec(); in2->loss.vec() += loss.vec() * in1->val.vec(); } public: // better to rewrite for deep understanding inline bool typeEqual(PNode other) { return Node::typeEqual(other); } inline PExecute generate(bool bTrain, dtype cur_drop_factor); }; class PMultiExecute :public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute PMultiNode::generate(bool bTrain, dtype cur_drop_factor) { PMultiExecute* exec = new PMultiExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; }; #endif
solvers.h
#ifndef SOLVERS_H #define SOLVERS_H #include "loss.h" #include "regul.h" #include "list.h" #define USING_SOLVER \ typedef typename loss_type::variable_type D; \ typedef typename loss_type::value_type T; \ typedef typename loss_type::index_type I;\ typedef loss_type LT; \ using Solver<loss_type>::_L; \ using Solver<loss_type>::_loss; \ using Solver<loss_type>::_regul; \ using Solver<loss_type>::_Li; \ using Solver<loss_type>::_verbose; \ using Solver<loss_type>::_max_iter_backtracking; #define USING_INCREMENTAL_SOLVER \ USING_SOLVER; \ using IncrementalSolver<loss_type>::_minibatch; \ using IncrementalSolver<loss_type>::_non_uniform_sampling; \ using IncrementalSolver<loss_type>::_n; \ using IncrementalSolver<loss_type>::_qi; #define USING_SVRG_SOLVER \ USING_INCREMENTAL_SOLVER; \ using SVRG_Solver<loss_type>::_xtilde;\ using SVRG_Solver<loss_type>::_gtilde; #define USING_ACC_SVRG_SOLVER \ using Acc_SVRG_Solver<loss_type,allow_acc>::_y;\ using Acc_SVRG_Solver<loss_type,allow_acc>::_etak;\ using Acc_SVRG_Solver<loss_type,allow_acc>::_gammak;\ using Acc_SVRG_Solver<loss_type,allow_acc>::_mu;\ using Acc_SVRG_Solver<loss_type,allow_acc>::_deltak;\ using Acc_SVRG_Solver<loss_type,allow_acc>::_thetak;\ using Acc_SVRG_Solver<loss_type,allow_acc>::_accelerated_solver;\ USING_SVRG_SOLVER; enum solver_t { ISTA, CATALYST_ISTA, QNING_ISTA, FISTA, SAGA, SVRG, SVRG_UNIFORM, CATALYST_SVRG, ACC_SVRG, MISO, CATALYST_MISO, QNING_SVRG, QNING_MISO, AUTO, INCORRECT_SOLVER }; solver_t solver_from_string(char* regul) { if (strcmp(regul,"ista")==0) return ISTA; if (strcmp(regul,"catalyst-ista")==0) return CATALYST_ISTA; if (strcmp(regul,"qning-ista")==0) return QNING_ISTA; if (strcmp(regul,"fista")==0) return FISTA; if (strcmp(regul,"saga")==0) return SAGA; if (strcmp(regul,"svrg")==0) return SVRG; if (strcmp(regul,"catalyst-svrg")==0) return CATALYST_SVRG; if (strcmp(regul,"qning-svrg")==0) return QNING_SVRG; if (strcmp(regul,"qning-miso")==0) return QNING_MISO; if (strcmp(regul,"acc-svrg")==0) return ACC_SVRG; if (strcmp(regul,"miso")==0) return MISO; if (strcmp(regul,"catalyst-miso")==0) return CATALYST_MISO; if (strcmp(regul,"svrg-uniform")==0) return SVRG_UNIFORM; if (strcmp(regul,"auto")==0) return AUTO; return INCORRECT_SOLVER; } template <typename T> struct ParamSolver { ParamSolver() { nepochs=100; it0=10; tol=T(1e-3); verbose=false; solver=FISTA; max_iter_backtracking=500; minibatch=1; threads=-1; non_uniform_sampling=true; l_memory=20; freq_restart=50; }; int nepochs; T tol; int it0; bool verbose; solver_t solver; int max_iter_backtracking; int minibatch; int threads; bool non_uniform_sampling; int l_memory; int freq_restart; }; template <typename loss_type> class Solver { public: typedef typename loss_type::variable_type D; typedef typename loss_type::value_type T; typedef typename loss_type::index_type I; Solver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param) : _loss(loss), _regul(regul) { _verbose=param.verbose; _it0=MAX(param.it0,1); _tol=param.tol; _nepochs=param.nepochs; _max_iter_backtracking=param.max_iter_backtracking; _best_dual=-INFINITY; _best_primal=INFINITY; _duality = _loss.provides_fenchel() && regul.provides_fenchel(); _optim_info.resize(6,MAX(param.nepochs/_it0,1)); _L=0; }; virtual ~Solver() { }; virtual void solve(const D& x0, D& x) { _time.start(); x.copy(x0); if (!_duality && _nepochs > 1) _xold.copy(x0); solver_init(x0); if (_verbose) { cout << "*********************************" << endl; print(); _loss.print(); _regul.print(); } for (int it = 1; it<=_nepochs; ++it) { if ((it % _it0) == 0) if (test_stopping_criterion(x,it)) break; solver_aux(x); } _time.stop(); if (_verbose) _time.printElapsed(); if (_best_primal != INFINITY) x.copy(_bestx); } void get_optim_info(Matrix<T>& optim) const { int count=0; for (int ii=0; ii<_optim_info.n(); ++ii) if (_optim_info(0,ii) != 0) ++count; if (count > 0) { optim.resize(6,count); } for (int ii=0; ii<count; ++ii) for (int jj=0; jj<6; ++jj) optim(jj,ii)=_optim_info(jj,ii); }; void eval(const D& x) { test_stopping_criterion(x,1); _optim_info(5,0)=0; }; virtual void set_dual_variable(const D& dual0) { }; virtual void save_state() { }; virtual void restore_state() { }; private: inline T get_dual(const D& x) const { if (!_regul.provides_fenchel() || !_loss.provides_fenchel()) { cerr << "Error: no duality gap available" << endl; return -INFINITY; } D grad1, grad2; _loss.get_dual_variable(x,grad1,grad2); const T dual = - _regul.fenchel(grad1,grad2); return dual - _loss.fenchel(grad1); }; inline bool test_stopping_criterion(const D& x, const int it) { const T primal =_loss.eval(x) + _regul.eval(x); _best_primal = MIN(_best_primal,primal); const int ii=MAX(it/_it0-1,0); const double sec = _time.getElapsed(); Vector<T> optim; _optim_info.refCol(ii,optim); if (_best_primal == primal) _bestx.copy(x); if (_verbose) { if (primal==_best_primal) { cout << "Epoch: " << it << ", primal objective: " << primal << ", time: " << sec << endl; } else { cout << "Epoch: " << it << ", primal objective: " << primal << ", best primal: " << _best_primal << ", time: " << sec << endl; } } optim[0]=it; optim[1]=primal; optim[5]=sec; if (_duality) { const T dual=get_dual(x); _best_dual=MAX(_best_dual,dual); const T duality_gap=(_best_primal-_best_dual)/abs<T>(_best_primal); if (_verbose) cout << "Best relative duality gap: " << duality_gap << endl; optim[2]=_best_dual; optim[3]=duality_gap; return duality_gap < _tol; } else { _xold.sub(x); const T diff=sqrt(_xold.nrm2sq()/MAX(EPSILON,x.nrm2sq())); _xold.copy(x); optim[4]=diff; return diff < _tol; } } protected: virtual void solver_init(const D& x0) = 0; virtual void solver_aux(D& x) = 0; virtual void print() const = 0; virtual int minibatch() const { return 1; }; bool _verbose; int _it0; int _nepochs; int _max_iter_backtracking; int _restart_frequency; T _tol; const loss_type& _loss; const Regularizer<D,I>& _regul; Timer _time; T _best_dual; T _best_primal; Matrix<T> _optim_info; bool _duality; D _xold; T _L; D _bestx; Vector<T> _Li; }; template <typename loss_type> class ISTA_Solver: public Solver<loss_type> { public: USING_SOLVER; ISTA_Solver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param, const Vector<T>* Li=NULL) : Solver<loss_type>(loss,regul,param) { _L=0; if (Li) { _Li.copy(*Li); _L = _Li.maxval()/100; } }; protected: virtual void solver_init(const D& x0) { if (_L==0) { _loss.lipschitz(_Li); _L = _Li.maxval()/100; } }; virtual void solver_aux(D& x) { int iter=1; const T fx = _loss.eval(x); D grad, tmp, tmp2; _loss.grad(x,grad); while (iter < _max_iter_backtracking) { tmp2.copy(x); tmp2.add(grad,-T(1.0)/_L); _regul.prox(tmp2,tmp,T(1.0)/_L); const T fprox = _loss.eval(tmp); tmp2.copy(tmp); tmp2.sub(x); if (fprox <= fx + grad.dot(tmp2) + T(0.5)*_L*tmp2.nrm2sq() + EPSILON) { x.copy(tmp); break; } _L *= T(1.5); if (_verbose) cout << "new value for L: " << _L << endl; ++iter; if (iter == _max_iter_backtracking) cout << "Warning: maximum number of backtracking iterations has been reached" << endl; } }; void print() const { cout << "ISTA Solver" << endl; }; T init_kappa_acceleration(const D& x0) { ISTA_Solver<loss_type>::solver_init(x0); return _L; }; }; template <typename loss_type> class FISTA_Solver final: public ISTA_Solver<loss_type> { public: USING_SOLVER; FISTA_Solver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param) : ISTA_Solver<loss_type>(loss,regul,param) { }; protected: virtual void solver_init(const D& x0) { ISTA_Solver<loss_type>::solver_init(x0); _t = T(1.0); _y.copy(x0); }; virtual void solver_aux(D& x) { ISTA_Solver<loss_type>::solver_aux(_y); D diff; diff.copy(x); x.copy(_y); diff.sub(x); const T old_t=_t; _t=(1.0+sqrt(1+4*_t*_t))/2; _y.add(diff,(T(1.0)-old_t)/_t); }; virtual void print() const { cout << "FISTA Solver" << endl; }; T _t; D _y; }; template <typename loss_type> class IncrementalSolver: public Solver<loss_type> { public: USING_SOLVER; IncrementalSolver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param, const Vector<T>* Li=NULL) : Solver<loss_type>(loss,regul,param) { _minibatch=param.minibatch; _non_uniform_sampling=param.non_uniform_sampling; if (Li) _Li.copy(*Li); }; protected: virtual void solver_init(const D& x0) { if (_Li.n() == 0) _loss.lipschitz(_Li); _n=_Li.n(); if (_L==0) { _qi.copy(_Li); _qi.scal(T(1.0)/_qi.sum()); const T Lmean = _Li.mean(); const T Lmax = _Li.maxval(); _non_uniform_sampling = (_non_uniform_sampling && Lmean <= T(0.9)*Lmax); _L = _non_uniform_sampling ? Lmean : Lmax; if (_minibatch > 1) heuristic_L(x0); _oldL=_L; if (_non_uniform_sampling) init_nonu_sampling(); } this->check_mkl(x0); }; void print() const { cout << "Incremental Solver "; if (_non_uniform_sampling) { cout << "with non uniform sampling" << endl; } else { cout << "with uniform sampling" << endl; } cout << "Lipschitz constant: " << _L << endl; }; bool _non_uniform_sampling; int _minibatch; int _n; Vector<T> _qi; Vector<double> _Ui; Vector<int> _Ki; T _oldL; void init_nonu_sampling() { _Ui.resize(_n); for (int ii=0; ii<_n; ++ii) _Ui[ii]=static_cast<double>(_qi[ii]); _Ui.scal(_n/_Ui.asum()); _Ki.resize(_n); _Ki.set(0); List<int> overfull; List<int> underfull; for (int ii=0; ii<_n; ++ii) { if (_Ui[ii] < double(1.0)) { underfull.push_back(ii); } else if (_Ui[ii] > double(1.0)) { overfull.push_back(ii); } } while (underfull.size() > 0 && overfull.size() > 0) { const int indj = underfull.front(); underfull.pop_front(); const int indi = overfull.front(); overfull.pop_front(); _Ki[indj]=indi; _Ui[indi]=_Ui[indi] + _Ui[indj] - double(1.0); if (_Ui[indi] < double(1.0)) { underfull.push_back(indi); } else if (_Ui[indi] > double(1.0)) { overfull.push_back(indi); } } }; int nonu_sampling() { const double x = static_cast<double>(random()-1)/RAND_MAX; const int ind = static_cast<int>(floor(_n*x))+1; const double y = _n*x+1-ind; if (y < _Ui[ind-1]) return ind-1; return _Ki[ind-1]; }; virtual int minibatch() const { return _minibatch; }; T init_kappa_acceleration(const D& x0) { IncrementalSolver<loss_type>::solver_init(x0); const T mu = _regul.strong_convexity(); return ((this->_oldL/(_n) - mu)); }; void check_mkl(const Vector<T>& x0) const { }; void check_mkl(const Matrix<T>& x0) const { if (x0.m() <= 15 || x0.n() <= 15) { set_mkl_sequential(); // TODO should be local } }; private: void heuristic_L(const D& x) { if (_verbose) cout << "Heuristic: Initial L=" << _L; const T Lmax=_L; _L /= _minibatch; int iter=0; D tmp, tmp2, grad; while (iter <= log(_minibatch)/log(2.0) && _L < Lmax) { tmp.copy(x); const T fx=_loss.eval_random_minibatch(tmp,_minibatch); _loss.grad_random_minibatch(tmp,grad,_minibatch); // should do non uniform // compute grad and fx tmp.add(grad,-T(1.0)/_L); const T ftmp=_loss.eval_random_minibatch(tmp,_minibatch); tmp2.copy(tmp); tmp2.sub(x); const T s1 =fx + grad.dot(tmp2); const T s2=T(0.5)*tmp2.nrm2sq(); if (ftmp > s1 + _L*s2) _L = MIN( MAX(2.0*_L, (ftmp-s1)/s2),Lmax); ++iter; } if (_verbose) cout << ", Final L=" << _L << endl; } }; template <typename loss_type> class SVRG_Solver: public IncrementalSolver<loss_type> { public: USING_INCREMENTAL_SOLVER; SVRG_Solver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param, const Vector<T>* Li=NULL) : IncrementalSolver<loss_type>(loss,regul,param,Li) { }; protected: virtual void solver_init(const D& x0) { IncrementalSolver<loss_type>::solver_init(x0); _xtilde.copy(x0); _loss.grad(_xtilde,_gtilde); }; virtual void solver_aux(D& x) { const int nn = _n/_minibatch; const T eta = T(1.0)/(3*_L); D tmp; for (int ii = 0; ii<nn; ++ii) { tmp.copy(x); tmp.add(_gtilde,-eta); for (int jj=0; jj<_minibatch; ++jj) { const int ind = _non_uniform_sampling ? this->nonu_sampling() : random() % _n; const T scal = _non_uniform_sampling ? T(1.0)/(_minibatch*_qi[ind]*_n) : T(1.0)/_minibatch; _loss.double_add_grad(x,_xtilde,ind,tmp,-scal*eta,scal*eta, jj==0 ? T(_minibatch) : 0); } _regul.prox(tmp,x,eta); if (random() % nn == 0) { _xtilde.copy(x); _loss.grad(_xtilde,_gtilde); } } }; void print() const { cout << "SVRG Solver" << endl; IncrementalSolver<loss_type>::print(); }; D _xtilde, _gtilde; }; template <typename loss_type> class MISO_Solver: public IncrementalSolver<loss_type> { public: USING_INCREMENTAL_SOLVER; MISO_Solver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param, const Vector<T>* Li=NULL) : IncrementalSolver<loss_type>(loss,regul,param,Li) { _minibatch=1; _mu= _regul.id() == L2 ? _regul.strong_convexity() : 0; _kappa = _loss.kappa(); if (_loss.id() == PPA) _mu += _kappa; _isprox=(_regul.id() != L2 || _regul.intercept()) && _regul.id() != NONE; _is_lazy = _isprox && _regul.is_lazy() && _loss.is_sparse(); _extern_zis=false; _count=0; }; virtual void set_dual_variable(const D& dual0) { _extern_zis=true; _zis.copyRef(dual0); }; virtual void solve(const D& y, D& x) { if (_count > 0 && (_count % 10) != 0) { D& ref_barz = _isprox ? _barz : x; ref_barz.add(_oldy,-_kappa/_mu); // necessary to have PPA loss here ref_barz.add(y,_kappa/_mu); const bool is_lazy = _isprox && _regul.is_lazy() && _loss.is_sparse(); if (_isprox && !is_lazy) _regul.prox(ref_barz,x,T(1.0)/_mu); } else if (_count==0) { x.copy(y); // just to have the right size } if (_loss.id()==PPA) _loss.get_anchor_point(_oldy); Solver<loss_type>::solve(x,x); }; virtual void save_state() { _count2=_count; _barz2.copy(_barz); _zis2.copy(_zis); _oldy2.copy(_oldy); }; virtual void restore_state() { _count=_count2; _barz.copy(_barz2); _zis.copy(_zis2); _oldy.copy(_oldy2); }; protected: virtual void solver_init(const D& x0) { // initial point will be in fact _z of PPA if (_count==0) { IncrementalSolver<loss_type>::solver_init(x0); _delta=MIN(T(1.0), _n*_mu/(2*_L)); if (_non_uniform_sampling) { const T beta=T(0.5)*_mu*_n; if (this->_Li.maxval() <= beta) { _non_uniform_sampling=false; _delta=T(1.0); } else if (this->_Li.minval() >= beta) { // no change } else { _qi.copy(this->_Li); _qi.thrsmax(beta); _qi.scal(T(1.0)/_qi.sum()); Vector<T> tmp; tmp.copy(_qi); tmp.inv(); tmp.mult(tmp,this->_Li); _L = tmp.maxval()/_n; this->init_nonu_sampling(); _delta=MIN(_n*_qi.minval(), _n*_mu/(2*_L)); } } if (_non_uniform_sampling) _delta=MIN(_delta,_n*_qi.minval()); if (_isprox) _barz.copy(x0); // if PPA, x0 should be the anchor point and _barz = X*_Zis + x0 init_dual_variables(x0); } }; virtual void solver_aux(D& x) { D& ref_barz = _isprox ? _barz : x; if (_count++ % 10 == 0) { if (_loss.id()==PPA) { _loss.get_anchor_point(ref_barz); ref_barz.scal(_kappa/_mu); } else { ref_barz.setZeros(); } if (_count > 1 || _extern_zis) _loss.add_feature(_zis,ref_barz,T(1.0)/(_n*_mu)); if (_isprox && !_is_lazy) _regul.prox(ref_barz,x,T(1.0)/_mu); } Vector<typename loss_type::index_type> indices; for (int ii = 0; ii<_n; ++ii) { const int ind = _non_uniform_sampling ? this->nonu_sampling() : random() % _n; const T scal = _non_uniform_sampling ? T(1.0)/(_qi[ind]*_n) : T(1.0); const T deltas=scal*_delta; if (_is_lazy) { _loss.get_coordinates(ind,indices); _regul.lazy_prox(ref_barz,x,indices,T(1.0)/_mu); } solver_aux_aux(x,ref_barz,ind,deltas); if (_isprox && (!_is_lazy || ii==_n-1)) _regul.prox(ref_barz,x,T(1.0)/_mu); } }; void print() const { cout << "MISO Solver" << endl; IncrementalSolver<loss_type>::print(); }; private: D _zis, _zis2; D _barz, _barz2; D _oldy, _oldy2; T _mu; T _kappa; T _delta; int _count, _count2; bool _perform_update_barz; bool _isprox, _is_lazy,_extern_zis; void inline init_dual_variables(const Vector<T>& x0) { if (_zis.n() != _n) { _zis.resize(_n); _zis.setZeros(); } } void inline init_dual_variables(const Matrix<T>& x0) { const int nclasses=_loss.transpose() ? x0.m() : x0.n(); if (_zis.n() != _n || _zis.m() != nclasses) { _zis.resize(nclasses,_n); _zis.setZeros(); } } void inline solver_aux_aux(const Vector<T>& x,Vector<T>& ref_barz,const int ind, const T deltas) { const T oldzi = _zis[ind]; _zis[ind] = (T(1.0)-deltas)*_zis[ind] + deltas*(-_loss.scal_grad(x,ind)); _loss.add_feature(ref_barz,ind,(_zis[ind]-oldzi)/(_n*_mu)); }; void inline solver_aux_aux(const Matrix<T>& x, Matrix<T>& ref_barz,const int ind, const T deltas) { Vector<T> oldzi, newzi; _zis.copyCol(ind,oldzi); _zis.refCol(ind,newzi); _loss.scal_grad(x,ind,newzi); newzi.add_scal(oldzi,T(1.0)-deltas,-deltas); oldzi.sub(newzi); oldzi.scal(-T(1.0)/(_n*_mu)); _loss.add_feature(ref_barz,ind,oldzi); }; }; template <typename SolverType> class Catalyst : public SolverType { public: typedef typename SolverType::LT loss_type; USING_SOLVER Catalyst(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param) : SolverType(loss,regul,param) { _auxiliary_solver=NULL; _loss_ppa=NULL; _accelerated_solver=true; _freq_restart=regul.strong_convexity() > 0 ? param.nepochs+2 : param.freq_restart; }; ~Catalyst() { if(_auxiliary_solver) delete(_auxiliary_solver); if(_loss_ppa) delete(_loss_ppa); }; virtual void set_dual_variable(const D& dual0) { _dual_var.copyRef(dual0); }; protected: virtual void solver_init(const D& x0) { _kappa = this->init_kappa_acceleration(x0); _mu = _regul.strong_convexity(); _count=0; _accelerated_solver=_kappa > 0; //this->_oldL/(_n) >= _mu; if (_accelerated_solver) { ParamSolver<T> param2; param2.nepochs=1; param2.it0=2; param2.verbose=false; param2.minibatch=this->minibatch(); this->_Li.add(_kappa); _loss_ppa = new ProximalPointLoss<loss_type>(_loss,x0,_kappa); _auxiliary_solver = new SolverType(*_loss_ppa,_regul,param2,&this->_Li); if (_dual_var.size() > 0) _auxiliary_solver->set_dual_variable(_dual_var); _y.copy(x0); _alpha=T(1.0); } else { if (_verbose) cout << "Switching to regular solver, problem is well conditioned" << endl; SolverType::solver_init(x0); } }; virtual void solver_aux(D& x) { if (_accelerated_solver) { const T q = _mu/(_mu+_kappa); D xold; xold.copy(x); _auxiliary_solver->solve(_y,x); const T alphaold=_alpha; _alpha = solve_binomial(T(1.0),_alpha*_alpha-q,-_alpha*_alpha); T beta= alphaold*(T(1.0)-alphaold)/(alphaold*alphaold+_alpha); if (++_count % _freq_restart == 0) { beta=0; _alpha=T(1.0); } _y.copy(xold); _y.add_scal(x,T(1.0)+beta,-beta); _loss_ppa->set_anchor_point(_y); } else { SolverType::solver_aux(x); } }; void print() const { cout << "Catalyst Accelerator" << endl; SolverType::print(); }; int _count, _freq_restart; T _kappa, _alpha, _mu; D _y, _dual_var; bool _accelerated_solver; SolverType* _auxiliary_solver; ProximalPointLoss<loss_type>* _loss_ppa; }; template <typename SolverType> class QNing final: public Catalyst<SolverType> { public: typedef typename SolverType::LT loss_type; USING_SOLVER using Catalyst<SolverType>::_kappa; using Catalyst<SolverType>::_accelerated_solver; using Catalyst<SolverType>::_auxiliary_solver; using Catalyst<SolverType>::_loss_ppa; using Catalyst<SolverType>::_y; QNing(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param) : Catalyst<SolverType>(loss,regul,param), _l_memory(param.l_memory) { _skipping_steps=0; _line_search_steps=0; }; virtual void solve(const D& x0, D& x) { Solver<loss_type>::solve(x0,x); if (_verbose) cout << "Total additional line search steps: " << _line_search_steps << endl; if (_verbose) cout << "Total skipping l-bfgs steps: " << _skipping_steps << endl; }; protected: virtual void solver_init(const D& x0) { Catalyst<SolverType>::solver_init(x0); if (_accelerated_solver) { _h0=T(1.0)/_kappa; _m=0; if (_verbose) cout << "Memory parameter: " << _l_memory << endl; _ys.resize(x0.size(),_l_memory); _ss.resize(x0.size(),_l_memory); _rhos.resize(_l_memory); _etak=T(1.0); _skipping_steps=0; _line_search_steps=0; } }; virtual void solver_aux(D& x) { if (_accelerated_solver) { if (_gk.size() == 0) get_gradient(x); // update variable _y and test D oldyk; oldyk.copy(_y); D oldxk; oldxk.copy(x); T oldFk=_Fk; D oldgk; oldgk.copy(_gk); D g; get_lbfgs_direction(g); const int max_iter=5; _auxiliary_solver->save_state(); for (int ii=1; ii<=max_iter; ++ii) { _y.copy(oldyk); _y.add(g,-_etak); _y.add(oldgk,-(T(1.0)-_etak)/_kappa); get_gradient(x); // _gk = kappa(x-y) if (_etak == 0 || _Fk <= oldFk - (T(0.25)/_kappa)*oldgk.nrm2sq()) break; if (_Fk > 1.05*oldFk) { _auxiliary_solver->restore_state(); x.copy(oldxk); } _etak /= 2; _line_search_steps++; if (ii==max_iter-1 || _etak < T(0.1)) { _etak=0; } } if (_Fk > 1.05*oldFk) { FLAG(5) _auxiliary_solver->restore_state(); x.copy(oldxk); reset_lbfgs(); } else { oldyk.add_scal(_y,T(1.0),-T(1.0)); oldgk.add_scal(_gk,T(1.0),-T(1.0)); update_lbfgs_matrix(oldyk,oldgk); } _etak=MAX(MIN(T(1.0),_etak*T(1.2)),T(0.1)); } else { SolverType::solver_aux(x); } }; void print() const { cout << "QNing Accelerator" << endl; SolverType::print(); }; private: inline void get_lbfgs_direction(Vector<T>& g) const { g.copy(_gk); get_lbfgs_direction_aux(g); }; inline void get_lbfgs_direction(Matrix<T>& g) const { g.copy(_gk); Vector<T> gg; g.toVect(gg); get_lbfgs_direction_aux(gg); }; inline void get_lbfgs_direction_aux(Vector<T>& g) const { // two-loop recursion algorithm Vector<T> alphas(_l_memory); Vector<T> cols, coly; T gamma=T(1.0)/_kappa; for (int ii = _m-1; ii>= MAX(_m-_l_memory,0); --ii) { const int ind = ii % _l_memory; _ss.refCol(ind,cols); _ys.refCol(ind,coly); if (ii==_m-1) gamma=cols.dot(coly)/coly.nrm2sq(); alphas[ind]=_rhos[ind]*cols.dot(g); g.add(coly,-alphas[ind]); } g.scal(gamma); for (int ii = MAX(_m-_l_memory,0); ii<= _m-1; ++ii) { const int ind = ii % _l_memory; _ss.refCol(ind,cols); _ys.refCol(ind,coly); const T beta = _rhos[ind]*coly.dot(g); g.add(cols,alphas[ind]-beta); } }; inline void update_lbfgs_matrix(const Matrix<T>& sk, const Matrix<T>& yk) { Vector<T> skk, ykk; sk.toVect(skk); yk.toVect(ykk); update_lbfgs_matrix(skk,ykk); }; inline void update_lbfgs_matrix(const Vector<T>& sk, const Vector<T>& yk) { const T theta=sk.dot(yk); if (theta > T(1e-12)) { Vector<T> coly, cols; const int ind=_m % _l_memory; _ys.refCol(ind,coly); coly.copy(yk); _ss.refCol(ind,cols); cols.copy(sk); _rhos[ind]=T(1.0)/theta; _m++; } else { _skipping_steps++; //if (_skipping_steps % 10 == 0) // reset_lbfgs(); } }; void reset_lbfgs() { _m=0; }; void get_gradient(D& x) { _loss_ppa->set_anchor_point(_y); _auxiliary_solver->solve(_y,x); _gk.copy(_y); _gk.add_scal(x,-_kappa,_kappa); _Fk=_loss_ppa->eval(x)+_regul.eval(x); }; T _h0; int _l_memory; INTM _m; Matrix<T> _ys, _ss; Vector<T> _rhos; D _gk, _xk; T _Fk; T _etak; int _skipping_steps, _line_search_steps; }; template <typename loss_type, bool allow_acc = true> class Acc_SVRG_Solver: public SVRG_Solver<loss_type> { public: USING_SVRG_SOLVER; Acc_SVRG_Solver(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param, const Vector<T>* Li=NULL) : SVRG_Solver<loss_type>(loss,regul,param,Li) { _accelerated_solver=allow_acc; }; virtual void solver_init(const D& x0) { IncrementalSolver<loss_type>::solver_init(x0); _mu = _regul.strong_convexity(); _nn=_n/_minibatch; _accelerated_solver=allow_acc && (T(20)*this->_oldL/_nn > _mu); if (_accelerated_solver) { _gammak = MAX(_L/(_nn),_mu); update_acceleration_parameters(); _xtilde.copy(x0); _y.copy(x0); _loss.grad(_xtilde,_gtilde); } else { if (_verbose && allow_acc) cout << "Problem is well conditioned, switching to regular solver" << endl; SVRG_Solver<loss_type>::solver_init(x0); } }; virtual void solver_aux(D& x) { if (_accelerated_solver) { for (int ii=0; ii<_nn; ++ii) { x.copy(_y); x.add(_gtilde,-_etak); for (int jj=0; jj<_minibatch; ++jj) { const int ind = _non_uniform_sampling ? this->nonu_sampling() : random() % _n; const T scal = _non_uniform_sampling ? T(1.0)/(_qi[ind]*_n*_minibatch) : T(1.0)/_minibatch; _loss.double_add_grad(_y,_xtilde,ind,x,-scal*_etak,scal*_etak); } _regul.prox(x,x,_etak); const T alphak=_mu*_deltak/_gammak; const T betak=_deltak/(_gammak*_etak); const T a=(T(1.0)-alphak)/_thetak + alphak; update_acceleration_parameters(); if (random() % _nn == 0) { _y.add_scal(_xtilde,(T(1.0)-a)*_thetak,_thetak*(a-betak)); _y.add(x,betak*_thetak + T(1.0)-_thetak); _xtilde.copy(x); _loss.grad(_xtilde,_gtilde); } else { _y.add_scal(_xtilde,T(1.0)-_thetak*a,_thetak*(a-betak)); _y.add(x,betak*_thetak); } }; } else { SVRG_Solver<loss_type>::solver_aux(x); } }; protected: void print() const { cout << "Accelerated SVRG Solver" << endl; if (!_accelerated_solver) cout << "Problem is well conditioned, switching to regular solver" << endl; IncrementalSolver<loss_type>::print(); }; bool _accelerated_solver; T _gammak, _mu, _deltak, _etak, _thetak; D _y; int _nn; void update_acceleration_parameters() { _deltak = MIN( solve_binomial(T(9.0)*_nn*_L/T(5.0), _gammak-_mu, -_gammak), T(1.0)/(3*_nn)); _gammak = (T(1.0)-_deltak)*_gammak+_mu*_deltak; _etak = MIN(T(1.0)/(3*_L), T(1.0)/(15*_gammak*_nn)); _thetak=(3*_nn*_deltak-5*_mu*_etak)/(3-5*_mu*_etak); }; }; template <typename loss_type, bool allow_acc=true> class SVRG_Solver_FastRidge: public Acc_SVRG_Solver< loss_type, allow_acc > { public: USING_ACC_SVRG_SOLVER; SVRG_Solver_FastRidge(const loss_type& loss, const Regularizer<D,I>& regul, const ParamSolver<T>& param, const Vector<T>* Li = NULL) : Acc_SVRG_Solver<loss_type,allow_acc>(loss,regul,param,Li), _is_lazy(loss_type::is_sparse()) { if (param.minibatch > 1) cerr << "Minibatch is not compatible with lazy updates" << endl; _minibatch=1; }; virtual void solver_init(const D& x0) { Acc_SVRG_Solver<loss_type,allow_acc>::solver_init(x0); if (_loss.id() == PPA) { const T kappa = _loss.kappa(); _gtilde.add(_xtilde,-kappa);// now gtilde has the right value } }; /// define auxiliary solver ? virtual void solver_aux(D& x) { if (_accelerated_solver) { const T lambda=_regul.lambda(); DoubleLazyVector<T,I>* lazyy = NULL; Vector<I> indices; if (_is_lazy) { lazyy=new DoubleLazyVector<T,I>(_y,_xtilde,_gtilde,_n); } for (int ii=0; ii<_n; ++ii) { const T alphak=_mu*_deltak/_gammak; const T betak=_deltak/(_gammak*_etak); const T a=(T(1.0)-alphak)/_thetak + alphak; const T scalprox = T(1.0)/(T(1.0)+lambda*_etak); const T eta=_etak; const int ind = _non_uniform_sampling ? this->nonu_sampling() : random() % _n; const T scaleta = _non_uniform_sampling ? eta/(_qi[ind]*_n) : eta; this->update_acceleration_parameters(); const bool update_xtilde = random() % _n == 0; const T coeffy=_thetak*(a-betak); const T coeffx= update_xtilde ? betak*_thetak + T(1.0)-_thetak :betak*_thetak; const T coeffxtilde = update_xtilde ? (T(1.0)-a)*_thetak : T(1.0)-_thetak*a; if (update_xtilde || ii==_n-1) { if (_is_lazy) lazyy->update(); x.copy(_y); _loss.double_add_grad(_y,_xtilde,ind,x,-scaleta,scaleta); x.add_scal(_gtilde,-scalprox*eta,scalprox); _y.add_scal(_xtilde,coeffxtilde,coeffy); _y.add(x,coeffx); } else { const T coeff_add_grad=scaleta * scalprox*coeffx/(coeffy+scalprox*coeffx); if (_is_lazy) { _loss.get_coordinates(ind, indices); lazyy->update(indices); } _loss.double_add_grad(_y,_xtilde,ind,_y,-coeff_add_grad,coeff_add_grad); if (_is_lazy) { lazyy->add_scal(coeffxtilde,-scalprox*eta*coeffx,coeffy+scalprox*coeffx); } else { _y.add_scal(_gtilde,-scalprox*eta*coeffx,coeffy+scalprox*coeffx); _y.add(_xtilde,coeffxtilde); } } if (update_xtilde) { _xtilde.copy(x); _loss.grad(_xtilde,_gtilde); } }; if (_is_lazy) delete(lazyy); } else if (_loss.id() == PPA) { /// we will optimize implicitly f(xtilde) - kappa <x , z> + (mu+kappa)/2|x|^2 /// meaning, we want gtilde to be Df(xtilde) - kappa z LazyVector<T,I>* lazyx = NULL; Vector<I> indices; if (_is_lazy) { //indices.resize(x.n()); lazyx=new LazyVector<T,I>(x,_gtilde,_n); } const T eta = T(1.0)/(3*(_L-_loss.kappa())); const T lambda=_regul.lambda()+_loss.kappa(); // take care of 0.5(mu+kappa)|x|^2, for (int ii = 0; ii<_n; ++ii) { const int ind = _non_uniform_sampling ? this->nonu_sampling() : random() % _n; const T scal = _non_uniform_sampling ? T(1.0)/(_qi[ind]*_n) : T(1.0); if (_is_lazy) { _loss.get_coordinates(ind, indices); lazyx->update(indices); _loss.double_add_grad(x,_xtilde,ind,x,-scal*eta,scal*eta,0); lazyx->add_scal(-eta,T(1.0)/(T(1.0)+eta*lambda)); } else { _loss.double_add_grad(x,_xtilde,ind,x,-scal*eta,scal*eta,0); // x <- x - s( D_i f(x) - D_i f(xtilde)) x.add_scal(_gtilde,-eta/(T(1.0)+eta*lambda),T(1.0)/(T(1.0)+eta*lambda)); } if (random() % _n == 0) { if (_is_lazy) lazyx->update(); _xtilde.copy(x); _loss.grad(_xtilde,_gtilde);// gtilde will be equal to Df(xtilde) + kappa (xtilde- z) _gtilde.add(_xtilde,-_loss.kappa());// now gtilde has the right value } } if (_is_lazy) { lazyx->update(); delete(lazyx); } } else { LazyVector<T,I>* lazyx = NULL; Vector<I> indices; if (_is_lazy) { //indices.resize(x.n()); lazyx=new LazyVector<T,I>(x,_gtilde,_n); } const T eta = T(1.0)/(3*_L); const T lambda=_regul.lambda(); // replace by lazyprox ? for (int ii = 0; ii<_n; ++ii) { const int ind = _non_uniform_sampling ? this->nonu_sampling() : random() % _n; const T scal = _non_uniform_sampling ? T(1.0)/(_qi[ind]*_n) : T(1.0); if (_is_lazy) { _loss.get_coordinates(ind, indices); lazyx->update(indices); _loss.double_add_grad(x,_xtilde,ind,x,-scal*eta,scal*eta); lazyx->add_scal(-eta,T(1.0)/(T(1.0)+eta*lambda)); // replace by lazyprox ? } else { _loss.double_add_grad(x,_xtilde,ind,x,-scal*eta,scal*eta); x.add_scal(_gtilde,-eta/(T(1.0)+eta*lambda),T(1.0)/(T(1.0)+eta*lambda)); } if (random() % _n == 0) { if (_is_lazy) lazyx->update(); _xtilde.copy(x); _loss.grad(_xtilde,_gtilde); } } if (_is_lazy) { lazyx->update(); delete(lazyx); } } }; protected: void print() const { if (_accelerated_solver) { cout << "Accelerated SVRG Solver, "; } else { cout << "SVRG Solver, "; } if (_is_lazy) { cout << "specialized for sparse matrices and L2 regularization" << endl; } else { cout << "specialized for L2 regularization" << endl; } IncrementalSolver<loss_type>::print(); }; private: const bool _is_lazy; }; template <typename loss_type> Solver<loss_type>* get_solver(const loss_type& loss, const Regularizer<typename loss_type::variable_type, typename loss_type::index_type>& regul, const ParamSolver<typename loss_type::value_type>& param) { typedef typename loss_type::value_type T; Solver<loss_type>* solver; solver_t solver_type=param.solver; if (solver_type==AUTO) { const T L=loss.lipschitz(); const int n = loss.n(); const T lambda=regul.strong_convexity(); if (n < 1000) { solver_type=QNING_ISTA; } else if (lambda < L/(100*n)) { solver_type=QNING_MISO; } else { solver_type=CATALYST_MISO; } } switch (solver_type) { case ISTA: solver= new ISTA_Solver<loss_type>(loss,regul,param); break; case QNING_ISTA: solver = new QNing< ISTA_Solver<loss_type> >(loss,regul,param); break; case CATALYST_ISTA: solver = new Catalyst< ISTA_Solver<loss_type> >(loss,regul,param); break; case FISTA: solver= new FISTA_Solver<loss_type>(loss,regul,param); break; case SVRG: solver= new SVRG_Solver<loss_type>(loss,regul,param); break; case MISO: solver= regul.strong_convexity() > 0 ? new MISO_Solver<loss_type>(loss,regul,param) : new Catalyst< MISO_Solver<loss_type> >(loss,regul,param); break; case SVRG_UNIFORM: { ParamSolver<typename loss_type::value_type> param2=param; param2.non_uniform_sampling=false; solver= new SVRG_Solver<loss_type>(loss,regul,param2); break; } case CATALYST_SVRG: solver = new Catalyst< SVRG_Solver<loss_type> >(loss,regul,param); break; case QNING_SVRG: solver = new QNing< SVRG_Solver<loss_type> >(loss,regul,param); break; case CATALYST_MISO: solver = new Catalyst< MISO_Solver<loss_type> >(loss,regul,param); break; case QNING_MISO: solver = new QNing< MISO_Solver<loss_type> >(loss,regul,param); break; case ACC_SVRG: solver = new Acc_SVRG_Solver<loss_type>(loss,regul,param); break; default: cerr << "Not implemented, performs nothing"; solver=NULL; } return solver; }; template <typename M> void simple_erm(const M& X, const Vector<typename M::value_type>& y, const Vector<typename M::value_type>& w0, Vector<typename M::value_type>& w, Vector<typename M::value_type>& dual_variable, Matrix<typename M::value_type>& optim_info, const ParamSolver<typename M::value_type>& param, const ParamModel<typename M::value_type>& model) { init_omp(param.threads); typedef typename M::value_type T; typedef typename M::index_type I; typedef Vector<T> D; typedef LinearLossVec<M> loss_type; if (model.intercept) { if (X.m()+1 != w0.n()) { cerr << "Dimension of initial point is not consistent. With intercept, if X is m x n, w0 should be (n+1)-dimensional." << endl; return; } } else { if (X.m() != w0.n()) { cerr << "Dimension of initial point is not consistent. If X is m x n, w0 should be n-dimensional." << endl; return; } } DataLinear<M> data(X,model.intercept); if (param.verbose) data.print(); LinearLossVec<M>* loss; switch (model.loss) { case SQUARE: loss = new SquareLoss<M>(data,y); break; case LOGISTIC: loss = new LogisticLoss<M>(data,y); break; case SQHINGE: loss = new SquaredHingeLoss<M>(data,y); break; //case HINGE: loss = new HingeLoss<M>(data,y); break; case SAFE_LOGISTIC: loss = new SafeLogisticLoss<M>(data,y); break; default: cerr << "Not implemented, square loss is chosen by default"; loss = new SquareLoss<M>(data,y); } Regularizer<D,I>* regul; switch(model.regul) { case L2: regul = new Ridge<D,I>(model); break; case L1: regul = new Lasso<D,I>(model); break; case L1BALL: regul = new L1Ball<D,I>(model); break; case L2BALL: regul = new L2Ball<D,I>(model); break; case FUSEDLASSO: regul = new FusedLasso<D,I>(model); break; case ELASTICNET: regul = new ElasticNet<D,I>(model); break; case NONE: regul = new None<D,I>(model); break; default: cerr << "Not implemented, no regularization is chosen"; regul = new None<D,I>(model); } Solver<loss_type>* solver; if (param.nepochs==0) { ParamSolver<typename D::value_type> param2=param; param2.verbose=false; solver= new ISTA_Solver<loss_type>(*loss,*regul,param2); solver->eval(w0); w.copy(w0); } else { if (param.solver==SVRG && model.regul==L2 && !model.intercept) { solver= new SVRG_Solver_FastRidge<loss_type,false>(*loss,*regul,param); } else if (param.solver==ACC_SVRG && model.regul==L2 && !model.intercept) { solver= new SVRG_Solver_FastRidge<loss_type,true>(*loss,*regul,param); } else if (param.solver==CATALYST_SVRG && model.regul==L2 && !model.intercept) { solver = new Catalyst< SVRG_Solver_FastRidge<loss_type,false> >(*loss,*regul,param); } else if (param.solver==QNING_SVRG && model.regul==L2 && !model.intercept) { solver = new QNing< SVRG_Solver_FastRidge<loss_type,false> >(*loss,*regul,param); } else { solver=get_solver<loss_type>(*loss,*regul,param); if (!solver) { w.copy(w0); delete(loss); delete(regul); return; } } D new_w0; if (model.intercept) { data.set_intercept(w0,new_w0); } else { new_w0.copyRef(w0); } if (dual_variable.n() != 0) solver->set_dual_variable(dual_variable); solver->solve(new_w0,w); if (model.intercept) { data.reverse_intercept(w); } } if (model.regul==L1) for (int ii=0; ii<w.n(); ++ii) if (abs<T>(w[ii]) < EPSILON) w[ii]=0; solver->get_optim_info(optim_info); delete(solver); delete(loss); delete(regul); }; template <typename T, typename I> Regularizer<Matrix<T>,I>* get_regul_mat(const ParamModel<T>& model, const int nclass, const bool transpose) { typedef Matrix<T> D; typedef Vector<T> V; Regularizer<D,I>* regul; switch(model.regul) { case L2: regul = transpose ? static_cast<Regularizer<D,I>*>(new RegVecToMat<Ridge<V,I> >(model)) : new RegMat<Ridge<V,I> >(model,nclass,transpose); break; case L1: regul = transpose ? static_cast<Regularizer<D,I>*>(new RegVecToMat<Lasso<V,I> >(model)) : new RegMat<Lasso<V,I> >(model,nclass,transpose); break; case ELASTICNET: regul = transpose ? static_cast<Regularizer<D,I>*>(new RegVecToMat<ElasticNet <V,I> >(model)) : new RegMat<ElasticNet<V,I> >(model,nclass,transpose); break; case L1BALL: regul = new RegMat<L1Ball<V,I> >(model,nclass,transpose); break; case L2BALL: regul = new RegMat<L2Ball<V,I> >(model,nclass,transpose); break; case L1L2: regul = new MixedL1L2<T,I>(model,nclass,transpose); break; case L1L2_L1: regul = new MixedL1L2_L1<T,I>(model,nclass,transpose); break; case L1LINF: regul = new MixedL1Linf<T,I>(model,nclass,transpose); break; case FUSEDLASSO: regul = new RegMat<FusedLasso<V,I> >(model,nclass,transpose); break; case NONE: regul = new None<D,I>(model); break; default: cerr << "Not implemented, no regularization is chosen"; regul = new None<D,I>(model); } return regul; }; template <typename loss_type> void solve_mat(loss_type& loss, const Regularizer<typename loss_type::variable_type, typename loss_type::index_type>& regul, const ParamSolver<typename loss_type::value_type>& param, const typename loss_type::variable_type& W0, typename loss_type::variable_type& W, Matrix<typename loss_type::value_type>& dual_variable, Matrix<typename loss_type::value_type>& optim_info) { typedef typename loss_type::value_type T; typedef typename loss_type::variable_type D; Solver<loss_type>* solver; if (param.nepochs==0) { ParamSolver<T> param2=param; param2.verbose=false; solver= new ISTA_Solver<loss_type>(loss,regul,param2); if (loss.transpose()) { Matrix<T> W0T, WT; W0.transpose(W0T); solver->eval(W0T); } else { solver->eval(W0); } W.copy(W0); } else { solver=get_solver<loss_type>(loss,regul,param); if (!solver) { W.copy(W0); return; } D new_W0; if (loss.intercept()) { loss.set_intercept(W0,new_W0); } else { new_W0.copyRef(W0); } if (dual_variable.n() != 0) solver->set_dual_variable(dual_variable); if (loss.transpose()) { Matrix<T> W0T, WT; new_W0.transpose(W0T); solver->solve(W0T,WT); WT.transpose(W); } else { solver->solve(new_W0,W); } if (loss.intercept()) { loss.reverse_intercept(W); } } if (regul.id()==L1) for (INTM ii=0; ii<W.n(); ++ii) for (INTM jj=0; jj<W.m(); ++jj) if (abs<T>(W(jj,ii)) < EPSILON) W(jj,ii)=0; solver->get_optim_info(optim_info); delete(solver); }; // X is p x n // y is nclasses x n // W0 is p x nclasses if no intercept (or p+1 x nclasses with intercept) // prediction model is W0^T X gives nclasses x n template <typename M> void multivariate_erm(const M& X, const Matrix<typename M::value_type>& y, const Matrix<typename M::value_type>& W0, Matrix<typename M::value_type>& W, Matrix<typename M::value_type>& dual_variable, Matrix<typename M::value_type>& optim_info, const ParamSolver<typename M::value_type>& param, const ParamModel<typename M::value_type>& model) { typedef typename M::value_type T; typedef typename M::index_type I; if ((model.intercept && X.m()+1 != W0.m()) || (!model.intercept && X.m() != W0.m())) { cerr << "Dimension of initial point is not consistent." << endl; return; } init_omp(param.threads); typedef Matrix<T> D; if (is_loss_for_matrices(model.loss) || is_regul_for_matrices(model.regul)) { DataMatrixLinear<M> data(X,model.intercept); if (param.verbose) data.print(); LinearLossMat< M, Matrix<T> >* loss; switch (model.loss) { case SQUARE: loss = new SquareLossMat<M>(data,y); break; case LOGISTIC: loss = new LossMat< LogisticLoss<M> >(data,y); break; case SQHINGE: loss = new LossMat< SquaredHingeLoss<M> >(data,y); break; //case HINGE: loss = new LossMat< HingeLoss<M> >(data,y); break; case SAFE_LOGISTIC: loss = new LossMat<SafeLogisticLoss<M> >(data,y); break; default: cerr << "Not implemented, square loss is chosen by default"; loss = new SquareLossMat<M>(data,y); } const int nclass=W0.n(); Regularizer<D,I>* regul = get_regul_mat<T,I>(model,nclass,loss->transpose()); solve_mat<LinearLossMat< M, Matrix<T> > >(*loss,*regul,param,W0,W,dual_variable,optim_info); delete(regul); delete(loss); } else { W.copy(W0); const int nclass=W0.n(); const int it0=MAX(param.it0,1); optim_info.resize(6,MAX(param.nepochs/it0,1)); optim_info.setZeros(); ParamSolver<T> param2=param; param2.verbose=false; if (param.verbose) { DataMatrixLinear<M> data(X,model.intercept); data.print(); } Timer global_all; global_all.start(); #pragma omp parallel for for (int ii=0; ii<nclass; ++ii) { Vector<T> w0col, wcol, ycol, dualcol; Matrix<T> optim_info_col; W0.refCol(ii,w0col); W.refCol(ii,wcol); y.copyRow(ii,ycol); if (dual_variable.m() == nclass) dual_variable.copyRow(ii,dualcol); simple_erm(X,ycol,w0col,wcol,dualcol,optim_info_col,param2,model); if (dual_variable.m() == nclass) dual_variable.copyToRow(ii,dualcol); #pragma omp critical { optim_info.add(optim_info_col); if (param.verbose) { const int noptim=optim_info_col.n()-1; cout << "Solver " << ii << " has terminated after " << optim_info_col(0,noptim) << " epochs in " << optim_info_col(5,noptim) << " seconds" << endl; if (optim_info_col(4,noptim)==0) { cout << " Primal objective: " << optim_info_col(1,noptim) << ", relative duality gap: " << optim_info_col(3,noptim) << endl; } else { cout << " Primal objective: " << optim_info_col(1,noptim) << ", tol: " << optim_info_col(4,noptim) << endl; } } } } global_all.stop(); if (param.verbose) { cout << "Time for the one-vs-all strategy" << endl; global_all.printElapsed(); } } }; template <typename M> void multivariate_erm(const M& X, const Vector<int>& y, const Matrix<typename M::value_type>& W0, Matrix<typename M::value_type>& W, Matrix<typename M::value_type>& dual_variable, Matrix<typename M::value_type>& optim_info, const ParamSolver<typename M::value_type>& param, const ParamModel<typename M::value_type>& model) { typedef typename M::value_type T; typedef typename M::index_type I; if ((model.intercept && X.m()+1 != W0.m()) || (!model.intercept && X.m() != W0.m())) { cerr << "Dimension of initial point is not consistent." << endl; return; } const int nclass=y.maxval()+1; if ((is_regression_loss(model.loss) || !is_loss_for_matrices(model.loss))) { const int n = y.n(); Matrix<typename M::value_type> labels(nclass,n); labels.set(-(1.0)); for (int ii=0; ii<n; ++ii) labels(y[ii],ii)=(1.0); return multivariate_erm(X,labels,W0,W,dual_variable,optim_info,param,model); } init_omp(param.threads); typedef Matrix<T> D; DataMatrixLinear<M> data(X,model.intercept); if (param.verbose) data.print(); LinearLossMat<M, Vector<int> >* loss; switch (model.loss) { case MULTI_LOGISTIC: loss= new MultiClassLogisticLoss<M>(data,y); break; default: cerr << "Not implemented, multilog loss is chosen by default"; loss= new MultiClassLogisticLoss<M>(data,y); } Regularizer<D,I>* regul = get_regul_mat<T,I>(model,nclass,loss->transpose()); solve_mat<LinearLossMat<M, Vector<int> > >(*loss,*regul,param,W0,W,dual_variable,optim_info); delete(regul); delete(loss); }; #endif
EPI_fmt_plug.c
/* * EPiServer module for john 1.7.2 (and possibly later) * Uses hashes/salts found in the tblSID of an EPiServer database installation * * Created by Johannes Gumbel (johannes [at] iforge.cc) * * If you have any questions as to how a function incorporates with john, please refer to formats.h of john * * version 0.1 released on 10 jan 2007 * * See doc/README.format-epi for information on the input file format. * * Updated Dec, 2014, JimF. Added OMP, and allowed more than one hash to be * processed at once (OMP_SCALE stuff). */ #if FMT_EXTERNS_H extern struct fmt_main fmt_EPI; #elif FMT_REGISTERS_H john_register_one(&fmt_EPI); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 8192 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 32768 // Tuned, K8-dual HT #endif #endif // __MIC__ #endif #include "memdbg.h" #define CIPHERTEXT_LENGTH 105 #define PLAINTEXT_LENGTH 125 #define BINARY_LENGTH 20 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_LENGTH 30 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static int (*key_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_LENGTH / 4]; static char global_salt[SALT_LENGTH+1]; static struct fmt_tests global_tests[] = { {"0x5F1D84A6DE97E2BEFB637A3CB5318AFEF0750B856CF1836BD1D4470175BE 0x4D5EFDFA143EDF74193076F174AC47CEBF2F417F", "Abc.!23"}, // new tests from pass_gen.pl {"0x4F5233704337716F63526A7066344B52784F7A6363316750516A72335668 0x7346DA02479E55973E052FC9A173A3FEA4644FF8","test1"}, {"0x76706335715834565A55784662304F3367756350684F634447777A313642 0xDBD3D2764A376673164962E3EE2AE95AB6ED2759","thatsworking"}, {"0x6F724166466172354A7431316A4842746878434B6632744945574A37524A 0xE1ADE625160BB27C16184795715F1C9EF30C45B0","test3"}, {NULL} }; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*key_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(key_len); } /* * Expects ciphertext of format: 0xHEX*60 0xHEX*40 */ static int valid(char *ciphertext, struct fmt_main *self) { unsigned int len, n; if (!ciphertext) return 0; len = strnlen(ciphertext, CIPHERTEXT_LENGTH + 1); if (len != CIPHERTEXT_LENGTH) return 0; // check fixed positions if (ciphertext[0] != '0' || ciphertext[1] != 'x' || ciphertext[62] != ' ' || ciphertext[63] != '0' || ciphertext[64] != 'x') return 0; for (n = 2; n < 62 && atoi16u[ARCH_INDEX(ciphertext[n])] != 0x7F; ++n); if (n < 62) return 0; for (n = 65; n < CIPHERTEXT_LENGTH && atoi16u[ARCH_INDEX(ciphertext[n])] != 0x7F; ++n); return n == len; } static void _tobin(char* dst, char *src, unsigned int len) { unsigned int n; if (src[0] == '0' && src[1] == 'x') src += sizeof(char)*2; for (n = 0; n < len; ++n) dst[n] = atoi16[ARCH_INDEX(src[n*2])]<<4 | atoi16[ARCH_INDEX(src[n*2+1])]; } static void* get_binary(char *ciphertext) { static ARCH_WORD bin[(BINARY_LENGTH + sizeof(ARCH_WORD) - 1) / sizeof(ARCH_WORD)]; _tobin((char*)bin, (char*)(ciphertext+65), BINARY_LENGTH); return bin; } static void* get_salt(char *ciphertext) { static ARCH_WORD salt[(SALT_LENGTH + sizeof(ARCH_WORD) - 1) / sizeof(ARCH_WORD)]; _tobin((char*)salt, (char*)(ciphertext+2), sizeof(salt)); return salt; } static void set_salt(void *salt) { memcpy(global_salt, salt, SALT_LENGTH); } static void set_key(char *key, int index) { key_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key)) + 1; } static char* get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i=0; #ifdef _OPENMP #pragma omp parallel for private(i) shared(global_salt, saved_key, key_len, crypt_out) #endif #if defined (_OPENMP) || MAX_KEYS_PER_CRYPT>1 for (i = 0; i < count; ++i) #endif { SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char*)global_salt, SALT_LENGTH-1); SHA1_Update(&ctx, saved_key[i], key_len[i]); SHA1_Final((unsigned char*)crypt_out[i], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if ( ((uint32_t*)binary)[0] == crypt_out[index][0] ) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_LENGTH); } static int cmp_exact(char *source, int index) { return 1; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static int salt_hash(void *salt) { return *(uint32_t*)salt & (SALT_HASH_SIZE - 1); } // Define john integration struct fmt_main fmt_EPI = { { // fmt_params "EPI", "EPiServer SID", "SHA1 32/" ARCH_BITS_STR, "", // benchmark comment 0, // benchmark length 0, PLAINTEXT_LENGTH, BINARY_LENGTH, BINARY_ALIGN, SALT_LENGTH, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, global_tests }, { // fmt_methods init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__pair_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_uint32 // A.*B function (eWiseMult): GB_AemultB__pair_uint32 // A*D function (colscale): GB_AxD__pair_uint32 // D*A function (rowscale): GB_DxB__pair_uint32 // C+=B function (dense accum): GB_Cdense_accumB__pair_uint32 // C+=b function (dense accum): GB_Cdense_accumb__pair_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__pair_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
sort.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "sort.h" #include "timer.h" #include "io.h" #include "thd_info.h" /****************************************************************************** * DEFINES *****************************************************************************/ /* switch to insertion sort past this point */ #define MIN_QUICKSORT_SIZE 8 /* don't bother spawning threads for small sorts */ #define SMALL_SORT_SIZE 1000 /****************************************************************************** * STATIC FUNCTIONS *****************************************************************************/ /** * @brief Compares ind*[i] and ind*[j] for two-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. * @param i The index into ind*. * @param j The second index into ind*. * * @return Returns -1 if ind[i] < ind[j], 1 if ind[i] > ind[j], and 0 if they * are equal. */ static inline int p_ttcmp2( idx_t const * const ind0, idx_t const * const ind1, idx_t const i, idx_t const j) { if(ind0[i] < ind0[j]) { return -1; } else if(ind0[j] < ind0[i]) { return 1; } if(ind1[i] < ind1[j]) { return -1; } else if(ind1[j] < ind1[i]) { return 1; } return 0; } /** * @brief Compares ind*[i] and j[*] for two-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. * @param i The index into ind*[] * @param j[2] The indices we are comparing i against. * * @return Returns -1 if ind[i] < j, 1 if ind[i] > j, and 0 if they are equal. */ static inline int p_ttqcmp2( idx_t const * const ind0, idx_t const * const ind1, idx_t const i, idx_t const j[2]) { if(ind0[i] < j[0]) { return -1; } else if(j[0] < ind0[i]) { return 1; } if(ind1[i] < j[1]) { return -1; } else if(j[1] < ind1[i]) { return 1; } return 0; } /** * @brief Perform insertion sort on a 2-mode tensor between start and end, * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_insertionsort2( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; val_t * const vals = tt->vals; val_t vbuf; idx_t ibuf; for(size_t i=start+1; i < end; ++i) { size_t j = i; while (j > start && p_ttcmp2(ind0, ind1, i, j-1) < 0) { --j; } vbuf = vals[i]; /* shift all data */ memmove(vals+j+1, vals+j, (i-j)*sizeof(val_t)); vals[j] = vbuf; ibuf = ind0[i]; memmove(ind0+j+1, ind0+j, (i-j)*sizeof(idx_t)); ind0[j] = ibuf; ibuf = ind1[i]; memmove(ind1+j+1, ind1+j, (i-j)*sizeof(idx_t)); ind1[j] = ibuf; } } /** * @brief Perform quicksort on a 2-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_quicksort2( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { val_t vmid; idx_t imid[2]; idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; val_t * const vals = tt->vals; if((end-start) <= MIN_QUICKSORT_SIZE) { p_tt_insertionsort2(tt, cmplt, start, end); } else { size_t i = start+1; size_t j = end-1; size_t k = start + ((end - start) / 2); /* grab pivot */ vmid = vals[k]; vals[k] = vals[start]; imid[0] = ind0[k]; imid[1] = ind1[k]; ind0[k] = ind0[start]; ind1[k] = ind1[start]; while(i < j) { /* if tt[i] > mid -> tt[i] is on wrong side */ if(p_ttqcmp2(ind0,ind1,i,imid) == 1) { /* if tt[j] <= mid -> swap tt[i] and tt[j] */ if(p_ttqcmp2(ind0,ind1,j,imid) < 1) { val_t vtmp = vals[i]; vals[i] = vals[j]; vals[j] = vtmp; idx_t itmp = ind0[i]; ind0[i] = ind0[j]; ind0[j] = itmp; itmp = ind1[i]; ind1[i] = ind1[j]; ind1[j] = itmp; ++i; } --j; } else { /* if tt[j] > mid -> tt[j] is on right side */ if(p_ttqcmp2(ind0,ind1,j,imid) == 1) { --j; } ++i; } } /* if tt[i] > mid */ if(p_ttqcmp2(ind0,ind1,i,imid) == 1) { --i; } vals[start] = vals[i]; vals[i] = vmid; ind0[start] = ind0[i]; ind1[start] = ind1[i]; ind0[i] = imid[0]; ind1[i] = imid[1]; if(i > start + 1) { p_tt_quicksort2(tt, cmplt, start, i); } ++i; /* skip the pivot element */ if(end - i > 1) { p_tt_quicksort2(tt, cmplt, i, end); } } } /** * @brief Compares ind*[i] and j[*] for three-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. Defer tie-breaks to ind2. * @param ind2 The final tie-breaking mode. * @param i The index into ind*[] * @param j[3] The indices we are comparing i against. * * @return Returns -1 if ind[i] < j, 1 if ind[i] > j, and 0 if they are equal. */ static inline int p_ttqcmp3( idx_t const * const ind0, idx_t const * const ind1, idx_t const * const ind2, idx_t const i, idx_t const j[3]) { if(ind0[i] < j[0]) { return -1; } else if(j[0] < ind0[i]) { return 1; } if(ind1[i] < j[1]) { return -1; } else if(j[1] < ind1[i]) { return 1; } if(ind2[i] < j[2]) { return -1; } else if(j[2] < ind2[i]) { return 1; } return 0; } /** * @brief Compares ind*[i] and ind*[j] for three-mode tensors. * * @param ind0 The primary mode to compare. Defer tie-breaks to ind1. * @param ind1 The secondary mode to compare. Defer tie-breaks to ind2. * @param ind2 The final tie-breaking mode. * @param i The index into ind*. * @param j The second index into ind*. * * @return Returns -1 if ind[i] < ind[j], 1 if ind[i] > ind[j], and 0 if they * are equal. */ static inline int p_ttcmp3( idx_t const * const ind0, idx_t const * const ind1, idx_t const * const ind2, idx_t const i, idx_t const j) { if(ind0[i] < ind0[j]) { return -1; } else if(ind0[j] < ind0[i]) { return 1; } if(ind1[i] < ind1[j]) { return -1; } else if(ind1[j] < ind1[i]) { return 1; } if(ind2[i] < ind2[j]) { return -1; } else if(ind2[j] < ind2[i]) { return 1; } return 0; } /** * @brief Compares ind*[i] and ind*[j] for n-mode tensors. * * @param tt The tensor we are sorting. * @param cmplt Mode permutation used for defining tie-breaking order. * @param i The index into ind*. * @param j The second index into ind*. * * @return Returns -1 if ind[i] < ind[j], 1 if ind[i] > ind[j], and 0 if they * are equal. */ static inline int p_ttcmp( sptensor_t const * const tt, idx_t const * const cmplt, idx_t const i, idx_t const j) { for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->ind[cmplt[m]][i] < tt->ind[cmplt[m]][j]) { return -1; } else if(tt->ind[cmplt[m]][j] < tt->ind[cmplt[m]][i]) { return 1; } } return 0; } /** * @brief Compares ind*[i] and ind*[j] for n-mode tensors. * * @param tt The tensor we are sorting. * @param cmplt Mode permutation used for defining tie-breaking order. * @param i The index into ind*. * @param j The coordinate we are comparing against. * * @return Returns -1 if ind[i] < j, 1 if ind[i] > j, and 0 if they are equal. */ static inline int p_ttqcmp( sptensor_t const * const tt, idx_t const * const cmplt, idx_t const i, idx_t const j[MAX_NMODES]) { for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->ind[cmplt[m]][i] < j[cmplt[m]]) { return -1; } else if(j[cmplt[m]] < tt->ind[cmplt[m]][i]) { return 1; } } return 0; } /** * @brief Swap nonzeros i and j. * * @param tt The tensor to operate on. * @param i The first nonzero to swap. * @param j The second nonzero to swap with. */ static inline void p_ttswap( sptensor_t * const tt, idx_t const i, idx_t const j) { val_t vtmp = tt->vals[i]; tt->vals[i] = tt->vals[j]; tt->vals[j] = vtmp; idx_t itmp; for(idx_t m=0; m < tt->nmodes; ++m) { itmp = tt->ind[m][i]; tt->ind[m][i] = tt->ind[m][j]; tt->ind[m][j] = itmp; } } /** * @brief Perform insertion sort on a 3-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_insertionsort3( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; idx_t * const ind2 = tt->ind[cmplt[2]]; val_t * const vals = tt->vals; val_t vbuf; idx_t ibuf; for(size_t i=start+1; i < end; ++i) { size_t j = i; while (j > start && p_ttcmp3(ind0, ind1, ind2, i, j-1) < 0) { --j; } vbuf = vals[i]; /* shift all data */ memmove(vals+j+1, vals+j, (i-j)*sizeof(val_t)); vals[j] = vbuf; ibuf = ind0[i]; memmove(ind0+j+1, ind0+j, (i-j)*sizeof(idx_t)); ind0[j] = ibuf; ibuf = ind1[i]; memmove(ind1+j+1, ind1+j, (i-j)*sizeof(idx_t)); ind1[j] = ibuf; ibuf = ind2[i]; memmove(ind2+j+1, ind2+j, (i-j)*sizeof(idx_t)); ind2[j] = ibuf; } } /** * @brief Perform insertion sort on an n-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_insertionsort( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { idx_t * ind; val_t * const vals = tt->vals; idx_t const nmodes = tt->nmodes; val_t vbuf; idx_t ibuf; for(size_t i=start+1; i < end; ++i) { size_t j = i; while (j > start && p_ttcmp(tt, cmplt, i, j-1) < 0) { --j; } vbuf = vals[i]; /* shift all data */ memmove(vals+j+1, vals+j, (i-j)*sizeof(val_t)); vals[j] = vbuf; for(idx_t m=0; m < nmodes; ++m) { ind = tt->ind[m]; ibuf = ind[i]; memmove(ind+j+1, ind+j, (i-j)*sizeof(idx_t)); ind[j] = ibuf; } } } /** * @brief Perform quicksort on a 3-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_quicksort3( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { val_t vmid; idx_t imid[3]; idx_t * const ind0 = tt->ind[cmplt[0]]; idx_t * const ind1 = tt->ind[cmplt[1]]; idx_t * const ind2 = tt->ind[cmplt[2]]; val_t * const vals = tt->vals; if((end-start) <= MIN_QUICKSORT_SIZE) { p_tt_insertionsort3(tt, cmplt, start, end); } else { size_t i = start+1; size_t j = end-1; size_t k = start + ((end - start) / 2); /* grab pivot */ vmid = vals[k]; vals[k] = vals[start]; imid[0] = ind0[k]; imid[1] = ind1[k]; imid[2] = ind2[k]; ind0[k] = ind0[start]; ind1[k] = ind1[start]; ind2[k] = ind2[start]; while(i < j) { /* if tt[i] > mid -> tt[i] is on wrong side */ if(p_ttqcmp3(ind0,ind1,ind2,i,imid) == 1) { /* if tt[j] <= mid -> swap tt[i] and tt[j] */ if(p_ttqcmp3(ind0,ind1,ind2,j,imid) < 1) { val_t vtmp = vals[i]; vals[i] = vals[j]; vals[j] = vtmp; idx_t itmp = ind0[i]; ind0[i] = ind0[j]; ind0[j] = itmp; itmp = ind1[i]; ind1[i] = ind1[j]; ind1[j] = itmp; itmp = ind2[i]; ind2[i] = ind2[j]; ind2[j] = itmp; ++i; } --j; } else { /* if tt[j] > mid -> tt[j] is on right side */ if(p_ttqcmp3(ind0,ind1,ind2,j,imid) == 1) { --j; } ++i; } } /* if tt[i] > mid */ if(p_ttqcmp3(ind0,ind1,ind2,i,imid) == 1) { --i; } vals[start] = vals[i]; vals[i] = vmid; ind0[start] = ind0[i]; ind1[start] = ind1[i]; ind2[start] = ind2[i]; ind0[i] = imid[0]; ind1[i] = imid[1]; ind2[i] = imid[2]; if(i > start + 1) { p_tt_quicksort3(tt, cmplt, start, i); } ++i; /* skip the pivot element */ if(end - i > 1) { p_tt_quicksort3(tt, cmplt, i, end); } } } /** * @brief Perform quicksort on a n-mode tensor between start and end. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. * @param start The first nonzero to sort. * @param end The last nonzero to sort. */ static void p_tt_quicksort( sptensor_t * const tt, idx_t const * const cmplt, idx_t const start, idx_t const end) { val_t vmid; idx_t imid[MAX_NMODES]; idx_t * ind; val_t * const vals = tt->vals; idx_t const nmodes = tt->nmodes; if((end-start) <= MIN_QUICKSORT_SIZE) { p_tt_insertionsort(tt, cmplt, start, end); } else { size_t i = start+1; size_t j = end-1; size_t k = start + ((end - start) / 2); /* grab pivot */ vmid = vals[k]; vals[k] = vals[start]; for(idx_t m=0; m < nmodes; ++m) { ind = tt->ind[m]; imid[m] = ind[k]; ind[k] = ind[start]; } while(i < j) { /* if tt[i] > mid -> tt[i] is on wrong side */ if(p_ttqcmp(tt,cmplt,i,imid) == 1) { /* if tt[j] <= mid -> swap tt[i] and tt[j] */ if(p_ttqcmp(tt,cmplt,j,imid) < 1) { p_ttswap(tt,i,j); ++i; } --j; } else { /* if tt[j] > mid -> tt[j] is on right side */ if(p_ttqcmp(tt,cmplt,j,imid) == 1) { --j; } ++i; } } /* if tt[i] > mid */ if(p_ttqcmp(tt,cmplt,i,imid) == 1) { --i; } vals[start] = vals[i]; vals[i] = vmid; for(idx_t m=0; m < nmodes; ++m) { ind = tt->ind[m]; ind[start] = ind[i]; ind[i] = imid[m]; } if(i > start + 1) { p_tt_quicksort(tt, cmplt, start, i); } ++i; /* skip the pivot element */ if(end - i > 1) { p_tt_quicksort(tt, cmplt, i, end); } } } /** * @brief Perform a simple parallel quicksort with OpenMP tasks. * * @param a The array to sort. * @param n The length of the array. */ static void p_quicksort( idx_t * const a, idx_t const n) { if(n < MIN_QUICKSORT_SIZE) { insertion_sort(a, n); } else { size_t i = 1; size_t j = n-1; size_t k = n >> 1; idx_t mid = a[k]; a[k] = a[0]; while(i < j) { if(a[i] > mid) { /* a[i] is on the wrong side */ if(a[j] <= mid) { /* swap a[i] and a[j] */ idx_t tmp = a[i]; a[i] = a[j]; a[j] = tmp; ++i; } --j; } else { if(a[j] > mid) { /* a[j] is on the right side */ --j; } ++i; } } if(a[i] > mid) { --i; } a[0] = a[i]; a[i] = mid; if(i > 1) { p_quicksort(a,i); } ++i; /* skip the pivot element */ if(n-i > 1) { p_quicksort(a+i, n-i); } } } /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline idx_t p_transpose_idx( idx_t const idx, idx_t const dim1, idx_t const dim2) { return idx%dim1*dim2 + idx/dim1; } /** * @brief Perform a counting sort on the most significant mode (cmplt[0]) and * then parallel quicksorts on each of slices. * * @param tt The tensor to sort. * @param cmplt Mode permutation used for defining tie-breaking order. */ static void p_counting_sort_hybrid( sptensor_t * const tt, idx_t * const cmplt) { idx_t m = cmplt[0]; idx_t nslices = tt->dims[m]; if(nslices == 0) return; idx_t * new_ind[MAX_NMODES]; for(idx_t i = 0; i < tt->nmodes; ++i) { if(i != m) { new_ind[i] = splatt_malloc(tt->nnz * sizeof(**new_ind)); } } val_t * new_vals = splatt_malloc(tt->nnz * sizeof(*new_vals)); idx_t * histogram_array = splatt_malloc( (nslices * splatt_omp_get_max_threads() + 1) * sizeof(*histogram_array)); #pragma omp parallel { int nthreads = splatt_omp_get_num_threads(); int tid = splatt_omp_get_thread_num(); idx_t * histogram = histogram_array + (nslices * tid); memset(histogram, 0, nslices * sizeof(idx_t)); idx_t j_per_thread = (tt->nnz + nthreads - 1)/nthreads; idx_t jbegin = SS_MIN(j_per_thread*tid, tt->nnz); idx_t jend = SS_MIN(jbegin + j_per_thread, tt->nnz); /* count */ for(idx_t j = jbegin; j < jend; ++j) { idx_t idx = tt->ind[m][j]; ++histogram[idx]; } #pragma omp barrier /* prefix sum */ for(idx_t j = (tid*nslices) + 1; j < (tid+1) * nslices; ++j) { idx_t transpose_j = p_transpose_idx(j, nthreads, nslices); idx_t transpose_j_minus_1 = p_transpose_idx(j - 1, nthreads, nslices); histogram_array[transpose_j] += histogram_array[transpose_j_minus_1]; } #pragma omp barrier #pragma omp master { for(int t = 1; t < nthreads; ++t) { idx_t j0 = (nslices*t) - 1, j1 = nslices * (t+1) - 1; idx_t transpose_j0 = p_transpose_idx(j0, nthreads, nslices); idx_t transpose_j1 = p_transpose_idx(j1, nthreads, nslices); histogram_array[transpose_j1] += histogram_array[transpose_j0]; } } #pragma omp barrier if (tid > 0) { idx_t transpose_j0 = p_transpose_idx(nslices*tid - 1, nthreads, nslices); for(idx_t j = tid*nslices; j < (tid+1) * nslices - 1; ++j) { idx_t transpose_j = p_transpose_idx(j, nthreads, nslices); histogram_array[transpose_j] += histogram_array[transpose_j0]; } } #pragma omp barrier /* now copy values into new structures (but not the mode we are sorting */ for(idx_t j_off = 0; j_off < (jend-jbegin); ++j_off) { /* we are actually going backwards */ idx_t const j = jend - j_off - 1; idx_t idx = tt->ind[m][j]; --histogram[idx]; idx_t offset = histogram[idx]; new_vals[offset] = tt->vals[j]; for(idx_t mode=0; mode < tt->nmodes; ++mode) { if(mode != m) { new_ind[mode][offset] = tt->ind[mode][j]; } } } } /* omp parallel */ for(idx_t i = 0; i < tt->nmodes; ++i) { if(i != m) { splatt_free(tt->ind[i]); tt->ind[i] = new_ind[i]; } } splatt_free(tt->vals); tt->vals = new_vals; histogram_array[nslices] = tt->nnz; /* for 3/4D, we can use quicksort on only the leftover modes */ if(tt->nmodes == 3) { #pragma omp parallel for schedule(dynamic) for(idx_t i = 0; i < nslices; ++i) { p_tt_quicksort2(tt, cmplt+1, histogram_array[i], histogram_array[i + 1]); for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) { tt->ind[m][j] = i; } } } else if(tt->nmodes == 4) { #pragma omp parallel for schedule(dynamic) for(idx_t i = 0; i < nslices; ++i) { p_tt_quicksort3(tt, cmplt+1, histogram_array[i], histogram_array[i + 1]); for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) { tt->ind[m][j] = i; } } } else { /* shift cmplt left one time, then do normal quicksort */ idx_t saved = cmplt[0]; memmove(cmplt, cmplt+1, (tt->nmodes - 1) * sizeof(*cmplt)); cmplt[tt->nmodes-1] = saved; #pragma omp parallel for schedule(dynamic) for(idx_t i = 0; i < nslices; ++i) { p_tt_quicksort(tt, cmplt, histogram_array[i], histogram_array[i + 1]); for(idx_t j = histogram_array[i]; j < histogram_array[i + 1]; ++j) { tt->ind[m][j] = i; } } /* undo cmplt changes */ saved = cmplt[tt->nmodes-1]; memmove(cmplt+1, cmplt, (tt->nmodes - 1) * sizeof(*cmplt)); cmplt[0] = saved; } splatt_free(histogram_array); } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void tt_sort( sptensor_t * const tt, idx_t const mode, idx_t * dim_perm) { tt_sort_range(tt, mode, dim_perm, 0, tt->nnz); } void tt_sort_range( sptensor_t * const tt, idx_t const mode, idx_t * dim_perm, idx_t const start, idx_t const end) { if(end <= start) { return; } idx_t * cmplt; if(dim_perm == NULL) { cmplt = (idx_t*) splatt_malloc(tt->nmodes * sizeof(idx_t)); cmplt[0] = mode; for(idx_t m=1; m < tt->nmodes; ++m) { cmplt[m] = (mode + m) % tt->nmodes; } } else { cmplt = dim_perm; } timer_start(&timers[TIMER_SORT]); if(start == 0 && end == tt->nnz) { p_counting_sort_hybrid(tt, cmplt); /* sort a subtensor */ } else { switch(tt->type) { case SPLATT_NMODE: p_tt_quicksort(tt, cmplt, start, end); break; case SPLATT_3MODE: p_tt_quicksort3(tt, cmplt, start, end); break; } } if(dim_perm == NULL) { free(cmplt); } timer_stop(&timers[TIMER_SORT]); } void insertion_sort( idx_t * const a, idx_t const n) { timer_start(&timers[TIMER_SORT]); for(size_t i=1; i < n; ++i) { idx_t b = a[i]; size_t j = i; while (j > 0 && a[j-1] > b) { --j; } memmove(a+(j+1), a+j, sizeof(idx_t)*(i-j)); a[j] = b; } timer_stop(&timers[TIMER_SORT]); } void quicksort( idx_t * const a, idx_t const n) { timer_start(&timers[TIMER_SORT]); p_quicksort(a,n); timer_stop(&timers[TIMER_SORT]); }
GB_binop__cmplx_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__cmplx_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp64) // C=scalar+B GB (_bind1st__cmplx_fp64) // C=scalar+B' GB (_bind1st_tran__cmplx_fp64) // C=A+scalar GB (_bind2nd__cmplx_fp64) // C=A'+scalar GB (_bind2nd_tran__cmplx_fp64) // C type: GxB_FC64_t // A type: double // B,b type: double // BinaryOp: cij = GxB_CMPLX (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GxB_CMPLX (GBX (Ax, pA, A_iso), 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GxB_CMPLX (GBX (Bx, pB, B_iso), 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GxB_CMPLX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CMPLX || GxB_NO_FP64 || GxB_NO_CMPLX_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__cmplx_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__cmplx_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__cmplx_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__cmplx_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__cmplx_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GxB_CMPLX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__cmplx_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GxB_CMPLX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__cmplx_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GxB_CMPLX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__cmplx_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.simd.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifndef WIN32 #include <pmmintrin.h> #else #include <xmmintrin.h> #endif #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS #define MAX_THREADS 128 // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define MAX_THREADS 128 #endif #ifdef __GNUC__ #define _MM_ALIGN16 __attribute__((aligned (16))) #define MUSTINLINE __attribute__((always_inline)) #else #define MUSTINLINE __forceinline #endif // NCO = Number of Concurrent Options = SIMD Width // NCO is currently set in the Makefile. #ifndef NCO #error NCO must be defined. #endif #if (NCO==2) #define fptype double #define SIMD_WIDTH 2 #define _MMR __m128d #define _MM_LOAD _mm_load_pd #define _MM_STORE _mm_store_pd #define _MM_MUL _mm_mul_pd #define _MM_ADD _mm_add_pd #define _MM_SUB _mm_sub_pd #define _MM_DIV _mm_div_pd #define _MM_SQRT _mm_sqrt_pd #define _MM_SET(A) _mm_set_pd(A,A) #define _MM_SETR _mm_set_pd #endif #if (NCO==4) #define fptype float #define SIMD_WIDTH 4 #define _MMR __m128 #define _MM_LOAD _mm_load_ps #define _MM_STORE _mm_store_ps #define _MM_MUL _mm_mul_ps #define _MM_ADD _mm_add_ps #define _MM_SUB _mm_sub_ps #define _MM_DIV _mm_div_ps #define _MM_SQRT _mm_sqrt_ps #define _MM_SET(A) _mm_set_ps(A,A,A,A) #define _MM_SETR _mm_set_ps #endif #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; _MM_ALIGN16 OptionData* data; _MM_ALIGN16 fptype* prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 MUSTINLINE void CNDF ( fptype * OutputX, fptype * InputX ) { int sign[SIMD_WIDTH]; int i; _MMR xInput; _MMR xNPrimeofX; _MM_ALIGN16 fptype expValues[SIMD_WIDTH]; _MMR xK2; _MMR xK2_2, xK2_3, xK2_4, xK2_5; _MMR xLocal, xLocal_1, xLocal_2, xLocal_3; for (i=0; i<SIMD_WIDTH; i++) { // Check for negative value of InputX if (InputX[i] < 0.0) { InputX[i] = -InputX[i]; sign[i] = 1; } else sign[i] = 0; } // printf("InputX[0]=%lf\n", InputX[0]); // printf("InputX[1]=%lf\n", InputX[1]); xInput = _MM_LOAD(InputX); // local vars // Compute NPrimeX term common to both four & six decimal accuracy calcs for (i=0; i<SIMD_WIDTH; i++) { expValues[i] = exp(-0.5f * InputX[i] * InputX[i]); // printf("exp[%d]: %f\n", i, expValues[i]); } xNPrimeofX = _MM_LOAD(expValues); xNPrimeofX = _MM_MUL(xNPrimeofX, _MM_SET(inv_sqrt_2xPI)); xK2 = _MM_MUL(_MM_SET(0.2316419), xInput); xK2 = _MM_ADD(xK2, _MM_SET(1.0)); xK2 = _MM_DIV(_MM_SET(1.0), xK2); // xK2 = _mm_rcp_pd(xK2); // No rcp function for double-precision xK2_2 = _MM_MUL(xK2, xK2); xK2_3 = _MM_MUL(xK2_2, xK2); xK2_4 = _MM_MUL(xK2_3, xK2); xK2_5 = _MM_MUL(xK2_4, xK2); xLocal_1 = _MM_MUL(xK2, _MM_SET(0.319381530)); xLocal_2 = _MM_MUL(xK2_2, _MM_SET(-0.356563782)); xLocal_3 = _MM_MUL(xK2_3, _MM_SET(1.781477937)); xLocal_2 = _MM_ADD(xLocal_2, xLocal_3); xLocal_3 = _MM_MUL(xK2_4, _MM_SET(-1.821255978)); xLocal_2 = _MM_ADD(xLocal_2, xLocal_3); xLocal_3 = _MM_MUL(xK2_5, _MM_SET(1.330274429)); xLocal_2 = _MM_ADD(xLocal_2, xLocal_3); xLocal_1 = _MM_ADD(xLocal_2, xLocal_1); xLocal = _MM_MUL(xLocal_1, xNPrimeofX); xLocal = _MM_SUB(_MM_SET(1.0), xLocal); _MM_STORE(OutputX, xLocal); // _mm_storel_pd(&OutputX[0], xLocal); // _mm_storeh_pd(&OutputX[1], xLocal); for (i=0; i<SIMD_WIDTH; i++) { if (sign[i]) { OutputX[i] = (1.0 - OutputX[i]); } } } // For debugging void print_xmm(_MMR in, char* s) { int i; _MM_ALIGN16 fptype val[SIMD_WIDTH]; _MM_STORE(val, in); printf("%s: ", s); for (i=0; i<SIMD_WIDTH; i++) { printf("%f ", val[i]); } printf("\n"); } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// void BlkSchlsEqEuroNoDiv (fptype * OptionPrice, int numOptions, fptype * sptprice, fptype * strike, fptype * rate, fptype * volatility, fptype * time, int * otype, float timet) { int i; // local private working variables for the calculation _MMR xStockPrice; _MMR xStrikePrice; _MMR xRiskFreeRate; _MMR xVolatility; _MMR xTime; _MMR xSqrtTime; _MM_ALIGN16 fptype logValues[NCO]; _MMR xLogTerm; _MMR xD1, xD2; _MMR xPowerTerm; _MMR xDen; _MM_ALIGN16 fptype d1[SIMD_WIDTH]; _MM_ALIGN16 fptype d2[SIMD_WIDTH]; _MM_ALIGN16 fptype FutureValueX[SIMD_WIDTH]; _MM_ALIGN16 fptype NofXd1[SIMD_WIDTH]; _MM_ALIGN16 fptype NofXd2[SIMD_WIDTH]; _MM_ALIGN16 fptype NegNofXd1[SIMD_WIDTH]; _MM_ALIGN16 fptype NegNofXd2[SIMD_WIDTH]; xStockPrice = _MM_LOAD(sptprice); xStrikePrice = _MM_LOAD(strike); xRiskFreeRate = _MM_LOAD(rate); xVolatility = _MM_LOAD(volatility); xTime = _MM_LOAD(time); xSqrtTime = _MM_SQRT(xTime); for(i=0; i<SIMD_WIDTH;i ++) { logValues[i] = log(sptprice[i] / strike[i]); } xLogTerm = _MM_LOAD(logValues); xPowerTerm = _MM_MUL(xVolatility, xVolatility); xPowerTerm = _MM_MUL(xPowerTerm, _MM_SET(0.5)); xD1 = _MM_ADD(xRiskFreeRate, xPowerTerm); xD1 = _MM_MUL(xD1, xTime); xD1 = _MM_ADD(xD1, xLogTerm); xDen = _MM_MUL(xVolatility, xSqrtTime); xD1 = _MM_DIV(xD1, xDen); xD2 = _MM_SUB(xD1, xDen); _MM_STORE(d1, xD1); _MM_STORE(d2, xD2); CNDF( NofXd1, d1 ); CNDF( NofXd2, d2 ); for (i=0; i<SIMD_WIDTH; i++) { FutureValueX[i] = strike[i] * (exp(-(rate[i])*(time[i]))); // printf("FV=%lf\n", FutureValueX[i]); // NofXd1[i] = NofX(d1[i]); // NofXd2[i] = NofX(d2[i]); // printf("NofXd1=%lf\n", NofXd1[i]); // printf("NofXd2=%lf\n", NofXd2[i]); if (otype[i] == 0) { OptionPrice[i] = (sptprice[i] * NofXd1[i]) - (FutureValueX[i] * NofXd2[i]); } else { NegNofXd1[i] = (1.0 - (NofXd1[i])); NegNofXd2[i] = (1.0 - (NofXd2[i])); OptionPrice[i] = (FutureValueX[i] * NegNofXd2[i]) - (sptprice[i] * NegNofXd1[i]); } // printf("OptionPrice[0] = %lf\n", OptionPrice[i]); } } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j, k; fptype price[NCO]; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for for (i=0; i<numOptions; i += NCO) { #else //ENABLE_OPENMP for (i=start; i<end; i += NCO) { #endif //ENABLE_OPENMP // Calling main function to calculate option value based on Black & Scholes's // equation. BlkSchlsEqEuroNoDiv(price, NCO, &(sptprice[i]), &(strike[i]), &(rate[i]), &(volatility[i]), &(otime[i]), &(otype[i]), 0); for (k=0; k<NCO; k++) { prices[i+k] = price[k]; } #ifdef ERR_CHK // Error checking. for (k=0; k<NCO; k++) { priceDelta = data[i+k].DGrefval - price[k]; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i + k, price[k], data[i+k].DGrefval, priceDelta); numError ++; } } #endif } } return 0; } int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(NCO > numOptions) { printf("ERROR: Not enough work for SIMD operation.\n"); fclose(file); exit(1); } if(nThreads > numOptions/NCO) { printf("WARNING: Not enough work, reducing number of threads to match number of SIMD options packets.\n"); nThreads = numOptions/NCO; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS int tids[nThreads]; for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); #else//ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef WIN32 if (nThreads > 1) { HANDLE threads[MAX_THREADS]; int nums[MAX_THREADS]; for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); } else #endif { int tid=0; bs_thread(&tid); } #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
idasFoodWeb_kry_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDAS: Food web problem, OpenMP, GMRES, * user-supplied preconditioner * * This example program uses SUNLinSol_SPGMR as the linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDAS using the SUNLinSol_SPGMR linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idasFoodWeb_kry_omp * To specify the number of threads at the command line, use * % ./idasFoodWeb_kry_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunlinsol/sunlinsol_spgmr.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_dense.h> #include <sundials/sundials_types.h> #include <sundials/sundials_math.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; realtype **PP[MX][MY]; sunindextype *pivot[MX][MY]; N_Vector rates; N_Vector ewt; void *ida_mem; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data); static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype delta, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype maxl, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, jx, jy, retval; sunindextype maxl; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */ #endif if (argc > 1) num_threads = strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->ewt = N_VNew_OpenMP(NEQ, num_threads); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy++) { (webdata->pivot)[jx][jy] = newIndexArray(NUM_SPECIES); (webdata->PP)[jx][jy] = newDenseMat(NUM_SPECIES, NUM_SPECIES); } } webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); webdata->ida_mem = ida_mem; /* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set preconditioning routines. */ maxl = 16; /* max dimension of the Krylov subspace */ LS = SUNLinSol_SPGMR(cc, PREC_LEFT, maxl); /* IDA only allows left preconditioning */ if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, NULL); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); retval = IDASetPreconditioner(ida_mem, Precond, PSolve); if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(maxl, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); N_VDestroy_OpenMP(webdata->ewt); for (jx = 0; jx < MX; jx++) { for (jy = 0; jy < MY; jy ++) { destroyArray((webdata->pivot)[jx][jy]); destroyMat((webdata->PP)[jx][jy]); } } free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } static int Precond(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, realtype cj, void *user_data) { int retval; realtype uround, xx, yy, del_x, del_y; realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp; realtype inc, fac, sqru, perturb_rates[NUM_SPECIES]; int is, js, jx, jy, ret; void *ida_mem; N_Vector ewt; realtype hh; UserData webdata; webdata = (UserData) user_data; del_x = webdata->dx; del_y = webdata->dy; uround = UNIT_ROUNDOFF; sqru = SUNRsqrt(uround); ida_mem = webdata->ida_mem; ewt = webdata->ewt; retval = IDAGetErrWeights(ida_mem, ewt); if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1); retval = IDAGetCurrentStep(ida_mem, &hh); if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1); for (jy = 0; jy < MY; jy++) { yy = jy * del_y; for (jx = 0; jx < MX; jx++) { xx = jx * del_x; Pxy = (webdata->PP)[jx][jy]; cxy = IJ_Vptr(cc, jx, jy); cpxy = IJ_Vptr(cp, jx, jy); ewtxy = IJ_Vptr(ewt, jx, jy); ratesxy = IJ_Vptr((webdata->rates), jx, jy); for (js = 0; js < NUM_SPECIES; js++) { inc = sqru*(SUNMAX(SUNRabs(cxy[js]), SUNMAX(hh*SUNRabs(cpxy[js]), ONE/ewtxy[js]))); cctmp = cxy[js]; cxy[js] += inc; fac = -ONE/inc; WebRates(xx, yy, cxy, perturb_rates, webdata); Pxycol = Pxy[js]; for (is = 0; is < NUM_SPECIES; is++) Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac; if (js < 1) Pxycol[js] += cj; cxy[js] = cctmp; } ret = denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]); if (ret != 0) return(1); } } return(0); } static int PSolve(realtype tt, N_Vector cc, N_Vector cp, N_Vector rr, N_Vector rvec, N_Vector zvec, realtype cj, realtype dalta, void *user_data) { realtype **Pxy, *zxy; sunindextype *pivot; int jx, jy; UserData webdata; webdata = (UserData) user_data; N_VScale(ONE, rvec, zvec); #pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads) for (jx = 0; jx < MX; jx++) { for (jy = 0; jy <MY; jy++) { zxy = IJ_Vptr(zvec, jx, jy); Pxy = (webdata->PP)[jx][jy]; pivot = (webdata->pivot)[jx][jy]; denseGETRS(Pxy, NUM_SPECIES, pivot, zxy); } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { int i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype maxl, realtype rtol, realtype atol) { printf("\nidasFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNLinSol_SPGMR, maxl = %ld\n",(long int) maxl); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, sli, netf, nps, npevals, nrevalsLS; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumLinIters(ida_mem, &sli); check_retval(&retval, "IDAGetNumLinIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumPrecSolves(ida_mem, &nps); check_retval(&retval, "IDAGetNumPrecSolves", 1); retval = IDAGetNumPrecEvals(ida_mem, &npevals); check_retval(&retval, "IDAGetNumPrecEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre); printf("Number of Preconditioner evaluations = %ld\n", npevals); printf("Number of linear iterations = %ld\n", sli); printf("Number of error test failures = %ld\n", netf); printf("Number of precond solve fun called = %ld\n", nps); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void *operator new(size_t bytes) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) LLVM_NOEXCEPT { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) LLVM_NOEXCEPT { return mem; } void operator delete(void *, const ASTContext &, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, const ASTContext *, unsigned) LLVM_NOEXCEPT {} void operator delete(void *, size_t) LLVM_NOEXCEPT {} void operator delete(void *, void *) LLVM_NOEXCEPT {} public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return const_child_range(child_iterator(Body), child_iterator(Body + CompoundStmtBits.NumStmts)); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return CapDeclAndKind.getPointer(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; /// \brief Const iterator that walks over the capture initialization /// arguments. typedef Expr *const *const_capture_init_iterator; typedef llvm::iterator_range<const_capture_init_iterator> const_capture_init_range; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
nestedpar.c
#include<omp.h> #include <stdio.h> void paroutput(char* s) { #pragma omp parallel printf("%s\n",s); } int main(void) { #pragma omp parallel { paroutput("before single"); #pragma omp single { paroutput("inside single"); } paroutput("after single"); } }
main.c
/* MIT License Copyright (c) 2019 Novak Kaluđerović Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include <math.h> #include <omp.h> #include "gmp.h" #include <time.h> #include <inttypes.h> #include <string.h> #include "common.h" #include <unistd.h> /* Static parameters (should be read from a project file or input) */ static char* folder; static uint64_t const_N; static uint64_t const_k_N; static int bits_N = 24; static int seed = 0; static int num_threads = 6; static int read_data = 1; static void usage() { fprintf(stderr, "precomp -b bname [ options ]\n"); fprintf(stderr, " -b bname: project file name\n"); fprintf(stderr, " -n bits: number of bits of N, default: 24\n"); fprintf(stderr, " -s seed: randomness seed, default: time(NULL)\n"); fprintf(stderr, " -t num_threads: number of threads. default: 6\n"); fprintf(stderr, " -r read_data: read precomputed data (1) or create it (0). default: 1\n"); fprintf(stderr, " -h help\n"); exit(1); } static void get_options(int argc, char **argv) { char c; folder = NULL; while ((c = getopt(argc, argv, "b:n:s:t:r:h")) != (char)(-1)) { switch (c) { case 'b': folder = optarg; break; case 'n': if (sscanf(optarg, "%d", &bits_N) != 1) complain("Bad argument to -n!\n"); break; case 's': if (sscanf(optarg, "%d", &seed) != 1) complain("Bad argument to -s!\n"); break; case 't': if (sscanf(optarg, "%d", &num_threads) != 1) complain("Bad argument to -t!\n"); break; case 'r': if (sscanf(optarg, "%d", &read_data) != 1) complain("Bad argument to -r!\n"); break; case 'h': usage(); break; default: fprintf(stderr, "Bad option %c\n", (char)c); usage(); } } if (folder == NULL) { fprintf(stderr, "argument -b bname is necessary\n"); usage(); } } static inline uint64_t generate_sequence(mpz_t R, mpz_t p, uint16_t seq_len) { assert(seq_len <= 64); mpz_t tmp; mpz_init_set(tmp, R); uint64_t seq = 0; for (uint16_t j = 0; j < seq_len; j++) { unsigned char bit = (1 - mpz_legendre(tmp, p)) >> 1; seq |= ((uint64_t)bit << j); mpz_add_ui(tmp,tmp,1); } mpz_clear(tmp); return seq; } static inline uint64_t static_get_position(uint64_t i, position_t position) { int64_t appx_pos = floor( (((double)CONST_A)/((double)(1ULL << ADDRESS_NUM_BITS))) * ((int64_t)i) ); int64_t pos = (int64_t)position; while (1) { if(labs(appx_pos - pos) < (1ULL << (ADDRESS_NUM_BITS - 1))) return ((uint64_t)pos); pos += (1ULL << ADDRESS_NUM_BITS); } } static inline int binary_search_t(sequence_t *seqs, sequence_t val, uint16_t seqs_len) { int32_t left, right, idx; idx = seqs_len >> 1; left = 0; right = seqs_len - 1; while (left <= right) { if (seqs[idx] < val) left = idx + 1; else if (seqs[idx] == val) return 0; else right = idx - 1; idx = (left + right) >> 1; } return -1; } static inline int binary_search64(sequence_J *seq_J, uint64_t val, uint64_t seqs_len, uint32_t *dd, uint32_t *ii) { int32_t left, right, idx; idx = seqs_len >> 1; left = 0; right = seqs_len - 1; while (left <= right) { if (seq_J[idx].seq < val) left = idx + 1; else if (seq_J[idx].seq == val) { *ii = seq_J[idx].i; *dd = seq_J[idx].d; return 0; } else right = idx - 1; idx = (left + right) >> 1; } return -1; } static inline uint64_t static_get_sequence_from_id(unsigned char *k_symbols, uint32_t i, uint32_t d, uint16_t seq_len, unsigned char negate_result) { assert(seq_len <= 64); uint64_t seq = 0; for (int j = 0; j < seq_len; j++) { uint32_t idx = i + j*d; uint32_t byte_idx = idx >> 3; uint32_t bit_idx = 7 - (idx & 0x7); unsigned char bit = (k_symbols[byte_idx] >> bit_idx) & 1; seq |= ((uint64_t)bit << j); } if (negate_result) seq ^= MASK(seq_len, 0); return seq; } static inline void static_get_two_sequences_from_id(unsigned char *J_list, uint32_t i, uint32_t d, uint16_t seq_len, unsigned char negate_result, uint64_t *seq, uint64_t *seq_minus, unsigned char symbol_minus1) { assert(seq_len <= 64); for (int j = 0; j < seq_len; j++) { uint64_t idx = i + j*d; uint64_t byte_idx = idx >> 3; uint64_t bit_idx = 7 - (idx & 0x7); unsigned char bit = (J_list[byte_idx] >> bit_idx) & 1; *seq |= ((uint64_t)bit << j); *seq_minus |= ((uint64_t)bit << (seq_len - 1 - j)); } if (negate_result) *seq ^= MASK(seq_len, 0); if (symbol_minus1^negate_result) *seq_minus ^= MASK(seq_len, 0); } static inline unsigned char bitmap_hit(unsigned char *bitmap, uint64_t seq) { uint64_t bmp_pos = seq & MASK(BITMAP_NUM_BITS,0); uint64_t pos_byt = bmp_pos >> 3; unsigned char pos_bit = bmp_pos & 0x7; unsigned char hit = (bitmap[pos_byt] >> pos_bit) & 1; return hit; } static inline int is_the_key_correct(mpz_t key, mpz_t p, uint64_t *tests) { mpz_t tmp; mpz_init_set(tmp, key); mpz_add_ui(tmp, tmp, (NUM_TESTS - 1) * TEST_BIT_CHUNKS); for (int i = NUM_TESTS - 1; i >= 0; i--) { uint64_t seq = generate_sequence(tmp, p, TEST_BIT_CHUNKS); if (seq != tests[i]) return 0; mpz_sub_ui(tmp, tmp, TEST_BIT_CHUNKS); } mpz_clear(tmp); return 1; } int main(int argc, char* argv[]) { char ppath[200], stringspath[200], occurrencespath[200], positionspath[200], bitmappath[200], allseqspath[200], jspath[200]; struct timespec start, end, parallel_end, parallel_start; get_options(argc, argv); //Get options and set initial parameters omp_set_num_threads(num_threads); //number of threads const_N = (1ULL << bits_N); //length of J_list const_k_N = (uint64_t)floor((double)(const_N - 1)/((double)(CONST_L - 1))); if(seed==0) seed=time(NULL); sprintf(ppath, "%s/p", folder); sprintf(stringspath, "%s/%s.bin", folder, folder); sprintf(positionspath, "%s/positionsL%dA%d", folder, CONST_L, ADDRESS_NUM_BITS); sprintf(bitmappath, "%s/bitmapL%dB%d", folder, CONST_L, BITMAP_NUM_BITS); sprintf(allseqspath, "%s/allseqsL%dA%d", folder, CONST_L, ADDRESS_NUM_BITS); //DEFINE VARIABLES mpz_t p_global; unsigned char *k_symbols = (unsigned char*) malloc(CONST_M_NUM_BYTES * sizeof(unsigned char)); position_t *positions = (position_t*) malloc((OCC_LEN + 1) * sizeof(position_t)); unsigned char* bitmap = (unsigned char*) malloc(BITMAP_NUM_BYTES); sequence_t *precomp_seqs = (sequence_t*) malloc(CONST_A * sizeof(sequence_t)); sequence_J *J_hits = (sequence_J*) calloc(MAX_J_HITS, sizeof(sequence_J)); // READ INPUT DATA AND MAKE AUXILIARY DATA (common.c) readprime(ppath, p_global); readsymbols(stringspath, k_symbols); if(read_data) { readpositions(positionspath, positions); readbitmap(bitmappath, bitmap); readallseqs(allseqspath, precomp_seqs); } else { makepositions(k_symbols, positions, p_global); make_allseqs_and_bitmap(precomp_seqs, k_symbols, bitmap, positions, p_global); sortallseqs(precomp_seqs, positions, num_threads); } // Start randomness srand(seed); gmp_randstate_t main_rstate; gmp_randinit_default(main_rstate); gmp_randseed_ui(main_rstate, rand()); // WORK int stop = 0; int num_js = 0; uint64_t tot_trials = 0; uint64_t tot_hits = 0; clock_gettime(CLOCK_MONOTONIC_RAW, &start); while (!stop) { printf("\nStarting the J sequence...\n"); mpz_t rand_j; mpz_init(rand_j); mpz_urandomm(rand_j, main_rstate, p_global); mpz_set_str(rand_j, "4533551310507856472906002", 10); gmp_printf("J = %Zd\n", rand_j); ++num_js; uint64_t num_trials = 0; uint64_t num_hits = 0; uint32_t curr_d = 0; // COMPUTE LIST OF SYMBOLS [J, J+N-1] unsigned char *J_list_g = (unsigned char*) malloc((const_N + const_k_N ) >> 3); generate_long_list(J_list_g, rand_j, p_global, const_N + const_k_N); clock_gettime(CLOCK_MONOTONIC_RAW, &parallel_start); #pragma omp parallel shared(curr_d, stop, num_hits) reduction(+: num_trials) { mpz_t p, J, tmp; mpz_init_set(p, p_global); mpz_init_set(J, rand_j); mpz_init(tmp); uint32_t num_ds; uint32_t d, d_index; uint64_t seq_J, hit_index; // TIMING AND COUNTING int my_id = omp_get_thread_num(); struct timespec thread_start, thread_end; uint64_t thread_trials = 0; // VARIABLES unsigned char symb_minus1; uint32_t *list_of_ds; // FUNCTIONS (common.h) symbolofmin1(p, &symb_minus1); denominators(const_N, &list_of_ds, &num_ds); // COMPUTE LIST OF SYMBOLS [J, J+N-1] unsigned char *J_list = (unsigned char*) malloc((const_N + const_k_N) >> 3); memcpy(J_list, J_list_g, (const_N + const_k_N)>>3); int stoppriv = 0; clock_gettime(CLOCK_MONOTONIC_RAW, &thread_start); while (!stoppriv) { #pragma omp atomic capture d_index = curr_d++; if (d_index >= num_ds) break; else d = list_of_ds[d_index]; // Compute the Legendre symbol of d mpz_set_ui(tmp, d); unsigned char d_symbol = (1 - mpz_legendre(tmp, p)) >> 1; // Generate the sequences and check for collisions for (uint32_t i = 0; i < d; i++) { #pragma omp atomic read stoppriv = stop; if (stoppriv) break; seq_J = static_get_sequence_from_id(J_list, i, d, CONST_L, d_symbol); if(bitmap_hit(bitmap, seq_J)) { uint64_t addr = seq_J & MASK(ADDRESS_NUM_BITS, 0); sequence_t val = (sequence_t)((seq_J >> ADDRESS_NUM_BITS) & MASK(CONST_L - ADDRESS_NUM_BITS, 0)); uint64_t pos_addrplus1 = static_get_position(addr + 1, positions[addr + 1]); uint64_t pos_addr = static_get_position(addr, positions[addr]); if(binary_search_t(&precomp_seqs[pos_addr], val, pos_addrplus1 - pos_addr) == 0) { #pragma omp atomic capture hit_index = num_hits++; if(hit_index >= MAX_J_HITS) stop = 1; else { J_hits[hit_index].seq = seq_J; J_hits[hit_index].d = d; J_hits[hit_index].i = i; } } } thread_trials += 1; } } #pragma omp atomic update num_trials += thread_trials; clock_gettime(CLOCK_MONOTONIC_RAW, &thread_end); //printf("Thread %d finished %ld trials in %lf seconds\n", my_id, thread_trials, delta(&thread_end, &thread_start)); mpz_clear(tmp); mpz_clear(J); mpz_clear(p); free(J_list); free(list_of_ds); } clock_gettime(CLOCK_MONOTONIC_RAW, &parallel_end); if (num_hits == MAX_J_HITS) num_hits--; printf("J done!\n"); printf("Number of trials = %" PRIu64 "\n", num_trials); printf("Number of hits is %" PRIu64 "\n", num_hits); printf("Finished the trials in %lf seconds\n", delta(&parallel_end, &parallel_start)); printf("Now testing if hits are good...\n"); tot_trials += num_trials; tot_hits += num_hits; gmp_sprintf(jspath, "%s/Jlist%Zd", folder, rand_j); // Sort J_hits qsort(J_hits, num_hits, sizeof(sequence_J), compareJ_seq); // Write rand_j and J_hits to a file FILE *fp = fopen(jspath, "wb"); assert((fp != NULL) && "Error opening the J_hits output file"); fwrite((sequence_J *)J_hits, num_hits * sizeof(sequence_J), 1, fp); fclose(fp); stop = 0; curr_d = 0; clock_gettime(CLOCK_MONOTONIC_RAW, &parallel_start); if(num_hits) #pragma omp parallel shared(curr_d, stop, k_symbols, num_hits) { uint32_t ii, d, dd; mpz_t k, J, dinv, p, tmp; mpz_inits(k, dinv, tmp, NULL); mpz_init_set(J, rand_j); mpz_init_set(p, p_global); // TIMING AND COUNTING int my_id = omp_get_thread_num(); struct timespec thread_start, thread_end; uint64_t thread_trials = 0; // VARIABLES unsigned char symbol_minus1; uint64_t tests[NUM_TESTS]; // FUNCTIONS (common.h) symbolofmin1(p, &symbol_minus1); maketests(k_symbols, tests); int stoppriv = 0; clock_gettime(CLOCK_MONOTONIC_RAW, &thread_start); while(!stoppriv) { #pragma omp atomic capture d = curr_d++; if (d > CONST_k) break; mpz_set_ui(tmp, d); unsigned char d_symbol = (1 - mpz_legendre(tmp, p)) >> 1; for(uint32_t j = 0; j < d; j++) { #pragma omp atomic read stoppriv = stop; if (stoppriv) break; uint64_t seq = 0; uint64_t seq_minus = 0; for (uint32_t i = j; i < CONST_M-(CONST_L-1)*d; i+=d) { if (i == j) static_get_two_sequences_from_id(k_symbols, i, d, CONST_L, d_symbol, &seq, &seq_minus, symbol_minus1); else { uint32_t idx = i + (CONST_L - 1)*d; uint32_t byte_idx = idx >> 3; uint32_t bit_idx = 7 - (idx & 0x7); unsigned char bit = (k_symbols[byte_idx] >> bit_idx) & 1; bit ^= d_symbol; seq = (seq >> 1) | ((uint64_t)bit << (CONST_L - 1)); seq = seq & MASK(CONST_L, 0); bit ^= symbol_minus1; seq_minus = (seq_minus << 1) | ((uint64_t)bit); seq_minus = seq_minus & MASK(CONST_L, 0); } if(binary_search64(J_hits, seq, num_hits, &dd, &ii) == 0) { mpz_set_ui(dinv, dd); mpz_invert(dinv, dinv, p); mpz_set(k,J); mpz_add_ui(k,k,ii); mpz_mul(k,k,dinv); mpz_mul_ui(k,k,d); mpz_sub_ui(k,k,i); mpz_mod(k,k,p); if (is_the_key_correct(k, p, tests)) { stop = 1; gmp_printf("k = %Zd\n", k); break; } } if(binary_search64(J_hits, seq_minus, num_hits, &dd, &ii) == 0) { mpz_set_ui(dinv, dd); mpz_invert(dinv, dinv, p); mpz_set(k,J); mpz_add_ui(k,k,ii); mpz_mul(k,k,dinv); mpz_mul_ui(k,k,d); mpz_mul_si(k,k,-1); mpz_sub_ui(k,k,(CONST_L-1)*(uint64_t)d); mpz_sub_ui(k,k,i); mpz_mod(k,k,p); if (is_the_key_correct(k, p, tests)) { stop = 1; gmp_printf("k = %Zd\n", k); break; } } } } } clock_gettime(CLOCK_MONOTONIC_RAW, &thread_end); //printf("Thread %d finished testing hits in %lf seconds\n", my_id, delta(&thread_end, &thread_start)); mpz_clears(tmp, J, p, k, dinv, NULL); } clock_gettime(CLOCK_MONOTONIC_RAW, &parallel_end); printf("Finished testing hits in %lf seconds\n", delta(&parallel_end, &parallel_start)); } // END WHILE J LOOP clock_gettime(CLOCK_MONOTONIC_RAW, &end); printf("\n\nTime taken to solve is %lf seconds\n", delta(&end, &start)); printf("Total number of trials is %" PRIu64 "\n", tot_trials); printf("Total number of hits is %" PRIu64 "\n", tot_hits); printf("Total number of Js is %d\n", num_js); free(k_symbols); free(positions); free(precomp_seqs); free(bitmap); return 0; }
omp_copyin.c
#define _XOPEN_SOURCE 600 #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <omp.h> #define ITER_LOOP 5 struct tls_data { int idx; char str[32]; } tls; #pragma omp threadprivate(tls) int main() { int i; printf("[Master] tls(%s) (%p)\n", tls.str, tls.str); #pragma omp parallel { #pragma omp single nowait { #pragma omp task { tls.idx = omp_get_thread_num(); strcpy(tls.str, "12345"); for (i=0;i<ITER_LOOP;i++) { printf("[Thread:%d] (i=%d) %s (%p)\n", tls.idx, i, tls.str, tls.str); sleep(1); } } #pragma omp task { tls.idx = omp_get_thread_num(); strcpy(tls.str, "67890"); for (i=0;i<ITER_LOOP;i++) { printf("[Thread:%d] (i=%d) %s (%p)\n", tls.idx, i, tls.str, tls.str); sleep(1); } } #pragma omp task { tls.idx = omp_get_thread_num(); strcpy(tls.str, "+-*/%"); for (i=0;i<ITER_LOOP;i++) { printf("[Thread:%d] (i=%d) %s (%p)\n", tls.idx, i, tls.str, tls.str); sleep(1); } } } } printf("[Master] tls(%s) (%p)\n", tls.str, tls.str); return 0; }
DRB047-doallchar-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* One dimension array computation with finer granularity than traditional 4 bytes. Dynamic tools monitoring 4-bytes elements may wrongfuly report race condition. */ char a[100]; int main() { int i; #pragma omp parallel for private(i) for (i=0;i<100;i++) a[i]=i; #pragma omp parallel for private(i) for (i=0;i<100;i++) a[i]=a[i]+1; for (i=0;i<100;i++) printf("%c\n",a[i]); return 0; }
Example_udr.3.c
/* * @@name: udr.3.c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_4.0 */ #include <stdio.h> #define N 100 struct mx_s { float value; int index; }; /* prototype functions for combiner and initializer in the declare reduction */ void mx_combine(struct mx_s *out, struct mx_s *in); void mx_init(struct mx_s *priv, struct mx_s *orig); #pragma omp declare reduction(maxloc: struct mx_s: \ mx_combine(&omp_out, &omp_in)) \ initializer(mx_init(&omp_priv, &omp_orig)) void mx_combine(struct mx_s *out, struct mx_s *in) { if ( out->value < in->value ) { out->value = in->value; out->index = in->index; } } void mx_init(struct mx_s *priv, struct mx_s *orig) { priv->value = orig->value; priv->index = orig->index; } int main(void) { struct mx_s mx; float val[N], d; int i, count = N; for (i = 0; i < count; i++) { d = (N*0.8f - i); val[i] = N * N - d * d; } mx.value = val[0]; mx.index = 0; #pragma omp parallel for reduction(maxloc: mx) for (i = 1; i < count; i++) { if (mx.value < val[i]) { mx.value = val[i]; mx.index = i; } } printf("max value = %g, index = %d\n", mx.value, mx.index); /* prints 10000, 80 */ return 0; }
atomic.c
#include <omp.h> int main (void) { int a; #pragma omp parallel { #pragma omp atomic a+=1; } return 0; }
GB_unop__asinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fp32_fp32) // op(A') function: GB (_unop_tran__asinh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lastprivateOrphaned.c
//#include <omp.h> int j = 0; void foo(int n) { int i; #pragma omp for lastprivate(j) for(i=1; i<n; i++){ j = j + 1; } }
calculate_E_field_flat_all_in_one-zachsversion.h
/* int k_delta[3][3] = {{1,0,0}, {0,1,0}, {0,0,1}}; */ /* Calculate the electric flux on both faces in the input direction. */ /* TO CALL THIS FROM MAIN DRIVER: for(int count = 1;count <= 2;count++) { int Ai = (flux_dirn+count)%3; // flux_dirn=1, count=1; Ai = 2: A2_rhs += - 0.25*[+FyBx(jp)+FyBx(jm)] : SIGN=-1 // flux_dirn=1, count=2; Ai = 0 // flux_dirn=0, count=1; Ai = 1 // flux_dirn=0, count=2; Ai = 2: A2_rhs += - 0.25*[-FxBy(ip)-FxBy(im)] : SIGN=+1 // flux_dirn=2, count=1; Ai = 0 // flux_dirn=2, count=2; Ai = 1 REAL SIGN=1.0; if(Ai == 0 && flux_dirn == 2) SIGN=-1.0; if(Ai == 1 && flux_dirn == 0) SIGN=-1.0; if(Ai == 2 && flux_dirn == 1) SIGN=-1.0; //printf("hey flux_dirn=%d ; Ai=%d ; SIGN=%e\n",flux_dirn,Ai,SIGN); if(SIGN==1.0) { calculate_E_field_flat_all_in_one_zachsversion(params, &auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF+(Ai+1)%3, 0)],&auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF+(Ai+2)%3, 0)], &auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF+(Ai+1)%3, 0)],&auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF+(Ai+2)%3, 0)], &auxevol_gfs[IDX4ptS(B_RU0GF +(Ai+1)%3, 0)],&auxevol_gfs[IDX4ptS(B_RU0GF +(Ai+2)%3,0)],&auxevol_gfs[IDX4ptS(B_RU0GF+Ai, 0)], &auxevol_gfs[IDX4ptS(B_LU0GF +(Ai+1)%3, 0)],&auxevol_gfs[IDX4ptS(B_LU0GF +(Ai+2)%3,0)],&auxevol_gfs[IDX4ptS(B_LU0GF+Ai, 0)], &rhs_gfs[IDX4ptS(AD0GF+Ai,0)], SIGN, flux_dirn); } else { calculate_E_field_flat_all_in_one_zachsversion(params, &auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF+(Ai+2)%3, 0)],&auxevol_gfs[IDX4ptS(VALENCIAV_RU0GF+(Ai+1)%3, 0)], &auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF+(Ai+2)%3, 0)],&auxevol_gfs[IDX4ptS(VALENCIAV_LU0GF+(Ai+1)%3, 0)], &auxevol_gfs[IDX4ptS(B_RU0GF +(Ai+2)%3, 0)],&auxevol_gfs[IDX4ptS(B_RU0GF +(Ai+1)%3, 0)],&auxevol_gfs[IDX4ptS(B_RU0GF+Ai, 0)], &auxevol_gfs[IDX4ptS(B_LU0GF +(Ai+2)%3, 0)],&auxevol_gfs[IDX4ptS(B_LU0GF +(Ai+1)%3, 0)],&auxevol_gfs[IDX4ptS(B_LU0GF+Ai, 0)], &rhs_gfs[IDX4ptS(AD0GF+Ai,0)], SIGN, flux_dirn); } } */ void calculate_E_field_flat_all_in_one_zachsversion(const paramstruct *params, const REAL *Vr0,const REAL *Vr1, const REAL *Vl0,const REAL *Vl1, const REAL *Br0,const REAL *Br1,const REAL *Br2, const REAL *Bl0,const REAL *Bl1,const REAL *Bl2, REAL *Az_rhs,const REAL SIGN,const int flux_dirn) { #include "GiRaFFE_standalone_Ccodes/set_Cparameters.h" #pragma omp parallel for for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) { for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) { for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) { // First, we set the index from which we will read memory. indexp1 is incremented by // one point in the direction of reconstruction. These correspond to the faces at at // i-1/2 and i+1/2 int index = IDX3S(i0,i1,i2); int indexp1 = IDX3S(i0+k_delta[flux_dirn][0],i1+k_delta[flux_dirn][1],i2+k_delta[flux_dirn][2]); // Now, we read in memory. We need all components of velocity and magnetic field on both // the left and right sides of the interface at *both* faces. const double Valenciav_rU0 = Vr0[index]; const double Valenciav_rU1 = Vr1[index]; const double B_rU0 = Br0[index]; const double B_rU1 = Br1[index]; const double B_rU2 = Br2[index]; const double Valenciav_lU0 = Vl0[index]; const double Valenciav_lU1 = Vl1[index]; const double B_lU0 = Bl0[index]; const double B_lU1 = Bl1[index]; const double B_lU2 = Bl2[index]; // Calculate the flux vector on each face for each component of the E-field: const REAL F0B1_r = (Valenciav_rU0*B_rU1 - Valenciav_rU1*B_rU0); const REAL F0B1_l = (Valenciav_lU0*B_lU1 - Valenciav_lU1*B_lU0); // Compute the state vector for this flux direction const REAL U_r = B_rU2; const REAL U_l = B_lU2; // Basic HLLE solver: const REAL FHLL_0B1 = 0.5*(F0B1_r + F0B1_l - (U_r-U_l)); // Repeat at i+1 // Now, we read in memory. We need all components of velocity and magnetic field on both // the left and right sides of the interface at *both* faces. const double Valenciav_rU0_p1 = Vr0[indexp1]; const double Valenciav_rU1_p1 = Vr1[indexp1]; const double B_rU0_p1 = Br0[indexp1]; const double B_rU1_p1 = Br1[indexp1]; //const double B_rU2_p1 = Br2[indexp1]; const double Valenciav_lU0_p1 = Vl0[indexp1]; const double Valenciav_lU1_p1 = Vl1[indexp1]; const double B_lU0_p1 = Bl0[indexp1]; const double B_lU1_p1 = Bl1[indexp1]; //const double B_lU2_p1 = Bl2[indexp1]; // Calculate the flux vector on each face for each component of the E-field: const REAL F0B1_r_p1 = (Valenciav_rU0_p1*B_rU1_p1 - Valenciav_rU1_p1*B_rU0_p1); const REAL F0B1_l_p1 = (Valenciav_lU0_p1*B_lU1_p1 - Valenciav_lU1_p1*B_lU0_p1); // WRONG (fixme): Compute the state vector for this flux direction const REAL U_r_p1 = B_rU2_p1; printf("DO NOT USE calculate_E_field_flat_all_in_one-zachsversion.h!\n"); exit(1); const REAL U_l_p1 = B_lU2_p1; // Basic HLLE solver const REAL FHLL_0B1_p1 = 0.5*(F0B1_r_p1 + F0B1_l_p1 - (U_r_p1-U_l_p1)); Az_rhs[index] += SIGN*0.25*(FHLL_0B1 + FHLL_0B1_p1); } // END LOOP: for(int i0=NGHOSTS; i0<NGHOSTS+Nxx0; i0++) } // END LOOP: for(int i1=NGHOSTS; i1<NGHOSTS+Nxx1; i1++) } // END LOOP: for(int i2=NGHOSTS; i2<NGHOSTS+Nxx2; i2++) }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-common.h" #include "toplev.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* Language-specific declaration information. */ struct lang_decl GTY(()) { char dummy; }; /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) struct lang_type GTY(()) { /* In a RECORD_TYPE, a sorted array of the fields of the type. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s; /* In an ENUMERAL_TYPE, the min and max values. */ tree enum_min; tree enum_max; /* In a RECORD_TYPE, information specific to Objective-C, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; }; /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a label was defined in a statement expression which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_6 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of statement expression nesting and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_7 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was defined in the scope of an identifier with variably modified type which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_VM(EXP) \ DECL_LANG_FLAG_3 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of scopes of identifiers with variably modified type and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_VM(EXP) \ DECL_LANG_FLAG_5 (LABEL_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; }; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier. */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* The specifier itself. */ tree spec; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128 }; /* A sequence of declaration specifiers in C. */ struct c_declspecs { /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ enum c_typespec_keyword typespec_word; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether a type specifier has been seen. */ BOOL_BITFIELD type_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether a struct, union or enum type either had its content defined by a type specifier in the list or was the first visible declaration of its tag. */ BOOL_BITFIELD tag_defined_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "__thread" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ tree tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A list of VLA sizes from the parameters. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; location_t id_loc; /* Currently only set for cdk_id. */ union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; }; /* Save and restore the variables in this file and elsewhere that keep track of the progress of compilation of the current function. Used for nested functions. */ struct language_function GTY(()) { struct c_language_function base; tree x_break_label; tree x_cont_label; struct c_switch * GTY((skip)) x_switch_stack; struct c_arg_info * GTY((skip)) arg_info; int returns_value; int returns_null; int returns_abnormally; int warn_about_return_type; int extern_inline; }; /* Save lists of labels used or defined in particular contexts. Allocated on the parser obstack. */ struct c_label_list { /* The label at the head of the list. */ tree label; /* The rest of the list. */ struct c_label_list *next; }; /* Statement expression context. */ struct c_label_context_se { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The next outermost context. */ struct c_label_context_se *next; }; /* Context of variably modified declarations. */ struct c_label_context_vm { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The scope of this context. Multiple contexts may be at the same numbered scope, since each variably modified declaration starts a new context. */ unsigned scope; /* The next outermost context. */ struct c_label_context_vm *next; }; /* in c-parser.c */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern int global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void insert_block (tree); extern void c_expand_body (tree); extern void c_init_decl_processing (void); extern void c_dup_lang_specific_decl (tree); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (tree, tree); extern tree check_for_loop_decls (void); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (tree, location_t); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern void c_maybe_initialize_eh (void); extern void finish_decl (tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (tree, tree, tree); extern struct c_arg_info *get_parm_info (bool); extern tree grokfield (struct c_declarator *, struct c_declspecs *, tree); extern tree groktypename (struct c_type_name *); extern tree grokparm (const struct c_parm *); extern tree implicitly_declare (tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (struct function *); extern void c_pop_function_context (struct function *); extern void push_parm_decl (const struct c_parm *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *, bool); extern tree builtin_function (const char *, tree, int, enum built_in_class, const char *, tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (tree); extern int start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (enum tree_code, tree); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (enum tree_code, tree); extern int c_expand_decl (tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern int c_disregard_inline_limits (tree); extern int c_cannot_inline_tree_fn (tree *); extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern tree c_objc_common_truthvalue_conversion (tree expr); extern bool c_warn_unused_global_decl (tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); #define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \ c_build_qualified_type ((TYPE), \ ((CONST_P) ? TYPE_QUAL_CONST : 0) | \ ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0)) /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern struct c_switch *c_switch_stack; extern struct c_label_context_se *label_context_stack_se; extern struct c_label_context_vm *label_context_stack_vm; extern tree require_complete_type (tree); extern int same_translation_unit_p (tree, tree); extern int comptypes (tree, tree); extern bool c_vla_type_p (tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (tree, tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (struct c_expr); extern tree composite_type (tree, tree); extern tree build_component_ref (tree, tree); extern tree build_array_ref (tree, tree); extern tree build_external_ref (tree, int, location_t); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (struct c_expr); extern struct c_expr c_expr_sizeof_type (struct c_type_name *); extern struct c_expr parser_build_unary_op (enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (tree, tree, tree); extern tree build_compound_expr (tree, tree); extern tree c_cast_expr (struct c_type_name *, tree); extern tree build_c_cast (tree, tree); extern void store_init_value (tree, tree); extern void error_init (const char *); extern void pedwarn_init (const char *); extern void maybe_warn_string_init (tree, struct c_expr); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (int); extern struct c_expr pop_init_level (int); extern void set_init_index (tree, tree); extern void set_init_label (tree); extern void process_init_element (struct c_expr); extern tree build_compound_literal (tree, tree); extern tree c_start_case (tree); extern void c_finish_case (tree); extern tree build_asm_expr (tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern tree c_convert_parm_for_inlining (tree, tree, tree, int); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (tree); extern tree c_process_expr_stmt (tree); extern tree c_finish_expr_stmt (tree); extern tree c_finish_return (tree); extern tree c_finish_bc_stmt (tree *, bool); extern tree c_finish_goto_label (tree); extern tree c_finish_goto_ptr (tree); extern void c_begin_vm_scope (unsigned int); extern void c_end_vm_scope (unsigned int); extern tree c_expr_to_decl (tree, bool *, bool *, bool *); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (tree, tree); extern tree c_finish_omp_clauses (tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* Nonzero means we are reading code that came from a system header file. */ extern int system_header_p; /* True means global_bindings_p should return false even if the scope stack says we are in file scope. */ extern bool c_override_global_bindings_to_false; /* True means we've initialized exception handling. */ extern bool c_eh_initialized_p; /* In c-decl.c */ extern void c_finish_incomplete_decl (tree); extern void c_write_global_declarations (void); /* In order for the format checking to accept the C frontend diagnostic framework extensions, you must include this file before toplev.h, not after. */ #if GCC_VERSION >= 4001 #define ATTRIBUTE_GCC_CDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m) #else #define ATTRIBUTE_GCC_CDIAG(m, n) ATTRIBUTE_NONNULL(m) #endif extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); #endif /* ! GCC_C_TREE_H */
rendezvous.c
#include <stdio.h> #include <unistd.h> #include <omp.h> typedef struct printer printer; struct printer { int id, ink; }; printer pnt_main = { 1, 5 }; printer pnt_backup = { 2, 5 }; int print(const char * text, const char **error) { #pragma omp critical { printer *p = &pnt_main; if (!p->ink) p = &pnt_backup; if (!p->ink) *error = "Out of ink"; else { *error = 0; p->ink--; printf("%d | ", p->id, p->ink); while (*text != '\0') { putchar(*(text++)); fflush(stdout); usleep(30000); } putchar('\n'); } } return 0 != *error; } const char *humpty[] = { "Humpty Dumpty sat on a wall.", "Humpty Dumpty had a great fall.", "All the king's horses and all the king's men,", "Couldn't put Humpty together again." }; const char *goose[] = { "Old Mother Goose,", "When she wanted to wander,", "Would ride through the air,", "On a very fine gander.", "Jack's mother came in,", "And caught the goose soon,", "And mounting its back,", "Flew up to the moon." }; int main() { int i, j, len; const char *msg, **text; omp_set_num_threads(2); #pragma omp parallel for private(text, msg, len, j) for (i = 0; i < 2; i++) { text = i ? goose : humpty; len = (i ? sizeof(goose) : sizeof(humpty) ) / sizeof(const char*); for (j = 0; j < len; j++) { usleep(100000); if (print(text[j], &msg)) { fprintf(stderr, "Error: %s\n", msg); break; } } } return 0; }
Example_get_nthrs.2.c
/* * @@name: get_nthrs.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <omp.h> void work(int i); void correct() { int i; #pragma omp parallel private(i) { i = omp_get_thread_num(); work(i); } }
ILS3D.h
/* ################################################################################### # # BCMTools # # Copyright (c) 2011-2014 Institute of Industrial Science, The University of Tokyo. # All rights reserved. # # Copyright (c) 2012-2016 Advanced Institute for Computational Science (AICS), RIKEN. # All rights reserved. # # Copyright (c) 2017 Research Institute for Information Technology (RIIT), Kyushu University. # All rights reserved. # ################################################################################### */ #ifndef ILS3D_H #define ILS3D_H #include <cmath> #include <cfloat> #include "BlockManager.h" #include "LocalScalar3D.h" #include "real.h" #include "blas.h" #include "comm.h" #include "PM.h" class ILS3D { public: ILS3D() { } ~ILS3D() { } private: bool IsConverged( BlockManager& blockManager, real& residual, real& rr, real& bb, real& epsilon, int count, int countMax) { residual = (fabs(bb)>FLT_MIN) ? fabs(rr)/fabs(bb) : fabs(rr); if( residual <= epsilon*epsilon ) { return true; } if( isnan(residual) ) { std::cout << bb << " " << rr << std::endl; Exit(0); } if( isinf(residual) ) { Exit(0); } if( count == countMax ) { } return false; } void Jacobi_Smoother( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, real omega) { int vc = plsx->GetVC(); PM_Start(tm_JacobiSmoother, 0, 0, true); int NB = blockManager.getNumBlock(); BlockBase* block0 = blockManager.getBlock(0); ::Vec3i size = block0->getSize(); int NX = size.x; int NY = size.y; int NZ = size.z; int sz[3] = {NX, NY, NZ}; PM_Start(tm_JacobiSmoother_Calc, 0, 0, true); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<NB; ++n) { BlockBase* block = blockManager.getBlock(n); real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); real pomega = omega; jacobi_smoother_( x0, x, Ap, Aw, Ae, As, An, Ab, At, b, &pomega, sz, &vc); } PM_Stop(tm_JacobiSmoother_Calc, 0, 0, 16.0*NX*NY*NZ, NB); PM_Start(tm_JacobiSmoother_Swap, 0, 0, true); LSSwap(*plsx, *plsx0); PM_Stop(tm_JacobiSmoother_Swap); PM_Start(tm_JacobiSmoother_Comm, 0, 0, true); plsx->ImposeBoundaryCondition(blockManager); PM_Stop(tm_JacobiSmoother_Comm); PM_Stop(tm_JacobiSmoother); } void CalcAx( BlockManager& blockManager, LocalScalar3D<real>* plsAx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* Ax = plsAx->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); calc_ax_( Ax, Ap, Aw, Ae, As, An, Ab, At, x, sz, &vc); } } void CalcR( BlockManager& blockManager, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsb) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* r = plsr ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); real* b = plsb ->GetBlockData(block); calc_r_( r, Ap, Aw, Ae, As, An, Ab, At, x, b, sz, &vc); } } void CalcR2( BlockManager& blockManager, real& rr, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsb) { int vc = plsx->GetVC(); double rr_local = 0.0; #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for reduction(+: rr_local) #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); real* b = plsb ->GetBlockData(block); real rr_block = 0.0; calc_r2_( &rr_block, Ap, Aw, Ae, As, An, Ab, At, x, b, sz, &vc); rr_local += rr_block; // std::cout << rr_block << std::endl; } const MPI::Intracomm& comm = blockManager.getCommunicator(); double rr_global = 0.0; allreduce_(&rr_global, &rr_local); /* #ifdef _REAL_IS_DOUBLE_ comm.Allreduce(&rr_local, &rr_global, 1, MPI_DOUBLE_PRECISION, MPI_SUM); #else comm.Allreduce(&rr_local, &rr_global, 1, MPI_FLOAT, MPI_SUM); #endif */ rr = rr_global; } void DOT( BlockManager& blockManager, real& xy, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy) { int vc = plsx->GetVC(); double xy_local = 0.0; #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for reduction(+: xy_local) #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); real xy_block = 0.0; dot_(&xy_block, x, y, sz, &vc); xy_local += xy_block; } const MPI::Intracomm& comm = blockManager.getCommunicator(); double xy_global = 0.0; allreduce_(&xy_global, &xy_local); /* #ifdef _REAL_IS_DOUBLE_ comm.Allreduce(&xy_local, &xy_global, 1, MPI_DOUBLE_PRECISION, MPI_SUM); #else comm.Allreduce(&xy_local, &xy_global, 1, MPI_FLOAT, MPI_SUM); #endif */ xy = xy_global; } void AXPY( BlockManager& blockManager, LocalScalar3D<real>* plsy, LocalScalar3D<real>* plsx, real a) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* y = plsy->GetBlockData(block); real* x = plsx->GetBlockData(block); axpy_(y, x, &a, sz, &vc); } } void XPAY( BlockManager& blockManager, LocalScalar3D<real>* plsy, LocalScalar3D<real>* plsx, real a) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* y = plsy->GetBlockData(block); real* x = plsx->GetBlockData(block); xpay_(y, x, &a, sz, &vc); } } void AXPYZ( BlockManager& blockManager, LocalScalar3D<real>* plsz, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy, real a) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* z = plsz->GetBlockData(block); real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); axpyz_(z, x, y, &a, sz, &vc); } } void AXPBYPZ( BlockManager& blockManager, LocalScalar3D<real>* plsz, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy, real a, real b) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* z = plsz->GetBlockData(block); real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); axpbypz_(z, x, y, &a, &b, sz, &vc); } } public: void Fill( BlockManager& blockManager, LocalScalar3D<real>* plsx, real value) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx->GetBlockData(block); real v = value; fill_(x, &v, sz, &vc); } } void Copy( BlockManager& blockManager, LocalScalar3D<real>* plsy, LocalScalar3D<real>* plsx) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); copy_(y, x, sz, &vc); } } void Jacobi_PreConditioner( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, real omega, int countPreConditioner ) { for(int count=0; count<countPreConditioner; count++) { Jacobi_Smoother( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, omega); } } void Jacobi( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, real omega, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); for(count=1; count<=countMax; ++count) { Jacobi_Smoother( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, omega); real rr = 0.0; CalcR2( blockManager, rr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } } plsx->ImposeBoundaryCondition(blockManager); } void CG( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsp, LocalScalar3D<real>* plsq, LocalScalar3D<real>* plsz, LocalScalar3D<real>* plsx0, real omega, int countPreConditioner, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); if( fabs(bb) < FLT_MIN ) { residual = 0.0; count = 0; return; } CalcR( blockManager, plsr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); plsr->ImposeBoundaryCondition(blockManager); real rr0 = 1.0; real rr1 = 0.0; for(count=1; count<=countMax; ++count) { Fill(blockManager, plsz, 0.0); Jacobi_PreConditioner( blockManager, plsz, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsr, plsx0, omega, countPreConditioner); rr1 = 0.0; DOT( blockManager, rr1, plsr, plsz); if( fabs(rr1) < FLT_MIN ) { residual = rr1; count = 0; break; } real beta = rr1/rr0; if( count==1 ) { Copy( blockManager, plsp, plsz); } else { XPAY( blockManager, plsp, plsz, beta); } plsp->ImposeBoundaryCondition(blockManager); CalcAx( blockManager, plsq, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp); real qp = 0.0; DOT( blockManager, qp, plsq, plsp); real alpha = rr1/qp; AXPY( blockManager, plsx, plsp, alpha); AXPY( blockManager, plsr, plsq, -alpha); plsr->ImposeBoundaryCondition(blockManager); real rr = 0.0; DOT( blockManager, rr, plsr, plsr); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } rr0 = rr1; } plsx->ImposeBoundaryCondition(blockManager); } void BiCGSTAB( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsr0, LocalScalar3D<real>* plsp, LocalScalar3D<real>* plsp_, LocalScalar3D<real>* plsq_, LocalScalar3D<real>* plss, LocalScalar3D<real>* plss_, LocalScalar3D<real>* plst_, LocalScalar3D<real>* plsx0, real omega, int countPreConditioner, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); if( fabs(bb) < FLT_MIN ) { residual = 0.0; count = 0; return; } CalcR( blockManager, plsr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); Copy( blockManager, plsr0, plsr); real rr0 = 1.0; real alpha = 0.0; real gamma = 1.0; for(count=1; count<=countMax; ++count) { real rr1 = 0.0; DOT(blockManager, rr1, plsr, plsr0); if( fabs(rr1) < FLT_MIN ) { residual = rr1; count = 0; break; } if( count == 1 ) { Copy( blockManager, plsp, plsr); } else { real beta = rr1/rr0*alpha/gamma; AXPY( blockManager, plsp, plsq_, -gamma); XPAY( blockManager, plsp, plsr, beta); } plsp->ImposeBoundaryCondition(blockManager); Fill(blockManager, plsp_, 0.0); Jacobi_PreConditioner( blockManager, plsp_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp, plsx0, omega, countPreConditioner); CalcAx( blockManager, plsq_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp_); real q_r0 = 0.0; DOT( blockManager, q_r0, plsq_, plsr0); alpha = rr1/q_r0; AXPYZ( blockManager, plss, plsq_, plsr, -alpha); plss->ImposeBoundaryCondition(blockManager); Fill(blockManager, plss_, 0.0); Jacobi_PreConditioner( blockManager, plss_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss, plsx0, omega, countPreConditioner); CalcAx( blockManager, plst_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss_); real t_s = 0.0; DOT( blockManager, t_s, plst_, plss); real t_t_ = 0.0; DOT( blockManager, t_t_, plst_, plst_); gamma = t_s/t_t_; AXPBYPZ( blockManager, plsx, plsp_, plss_, alpha, gamma); AXPYZ( blockManager, plsr, plst_, plss, -gamma); real rr = 0.0; DOT( blockManager, rr, plsr, plsr); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } rr0 = rr1; } plsx->ImposeBoundaryCondition(blockManager); } void Jacobi_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT(blockManager, bb, plsb, plsb); for(count=1; count<=countMax; ++count) { Jacobi_Smoother_Mask( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, plsMaskId, omega); real rr = 0.0; CalcR2( blockManager, rr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } } plsx->ImposeBoundaryCondition(blockManager); } void BiCGSTAB_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsr, LocalScalar3D<real>* plsr0, LocalScalar3D<real>* plsp, LocalScalar3D<real>* plsp_, LocalScalar3D<real>* plsq_, LocalScalar3D<real>* plss, LocalScalar3D<real>* plss_, LocalScalar3D<real>* plst_, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega, int countPreConditioner, int countMax, real epsilon, int& count, real& residual) { real bb = 0.0; DOT_Mask(blockManager, bb, plsb, plsb, plsMaskId); if( fabs(bb) < FLT_MIN ) { residual = 0.0; count = 0; return; } CalcR( blockManager, plsr, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsx, plsb); Copy( blockManager, plsr0, plsr); real rr0 = 1.0; real alpha = 0.0; real gamma = 1.0; for(count=1; count<=countMax; ++count) { real rr1 = 0.0; DOT_Mask(blockManager, rr1, plsr, plsr0, plsMaskId); if( fabs(rr1) < FLT_MIN ) { residual = rr1; count = 0; break; } if( count == 1 ) { Copy( blockManager, plsp, plsr); } else { real beta = rr1/rr0*alpha/gamma; AXPY( blockManager, plsp, plsq_, -gamma); XPAY( blockManager, plsp, plsr, beta); } plsp->ImposeBoundaryCondition(blockManager); Fill(blockManager, plsp_, 0.0); Jacobi_PreConditioner_Mask( blockManager, plsp_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp, plsx0, plsMaskId, omega, countPreConditioner); CalcAx_Mask( blockManager, plsq_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsp_, plsMaskId); real q_r0 = 0.0; DOT_Mask( blockManager, q_r0, plsq_, plsr0, plsMaskId); alpha = rr1/q_r0; AXPYZ( blockManager, plss, plsq_, plsr, -alpha); plss->ImposeBoundaryCondition(blockManager); Fill(blockManager, plss_, 0.0); Jacobi_PreConditioner_Mask( blockManager, plss_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss, plsx0, plsMaskId, omega, countPreConditioner); CalcAx_Mask( blockManager, plst_, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plss_, plsMaskId); real t_s = 0.0; DOT_Mask( blockManager, t_s, plst_, plss, plsMaskId); real t_t_ = 0.0; DOT_Mask( blockManager, t_t_, plst_, plst_, plsMaskId); gamma = t_s/t_t_; AXPBYPZ( blockManager, plsx, plsp_, plss_, alpha, gamma); AXPYZ( blockManager, plsr, plst_, plss, -gamma); real rr = 0.0; DOT_Mask( blockManager, rr, plsr, plsr, plsMaskId); bool bResult = IsConverged( blockManager, residual, rr, bb, epsilon, count, countMax); if( bResult == true ) { break; } rr0 = rr1; } plsx->ImposeBoundaryCondition(blockManager); } void DOT_Mask( BlockManager& blockManager, real& xy, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsy, LocalScalar3D<int>* plsMaskId) { PM_Start(tm_DOT, 0, 0, true); int vc = plsx->GetVC(); double xy_local = 0.0; int NB = blockManager.getNumBlock(); BlockBase* block0 = blockManager.getBlock(0); ::Vec3i size = block0->getSize(); int NX = size.x; int NY = size.y; int NZ = size.z; int sz[3] = {size.x, size.y, size.z}; PM_Start(tm_DOT_Calc, 0, 0, true); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for reduction(+: xy_local) #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); real* x = plsx->GetBlockData(block); real* y = plsy->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real xy_block = 0.0; dot_mask_(&xy_block, x, y, mask, sz, &vc); xy_local += xy_block; } PM_Stop(tm_DOT_Calc, 0, 0, 2.0*NX*NY*NZ, NB); const MPI::Intracomm& comm = blockManager.getCommunicator(); double xy_global = 0.0; PM_Start(tm_DOT_Comm, 0, 0, true); allreduce_(&xy_global, &xy_local); PM_Stop(tm_DOT_Comm); xy = xy_global; PM_Stop(tm_DOT); } void Jacobi_PreConditioner_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega, int countPreConditioner ) { for(int count=0; count<countPreConditioner; count++) { Jacobi_Smoother_Mask( blockManager, plsx, plsAp, plsAw, plsAe, plsAs, plsAn, plsAb, plsAt, plsb, plsx0, plsMaskId, omega); } } void Jacobi_Smoother_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega) { int vc = plsx->GetVC(); PM_Start(tm_JacobiSmoother, 0, 0, true); int NB = blockManager.getNumBlock(); BlockBase* block0 = blockManager.getBlock(0); ::Vec3i size = block0->getSize(); int NX = size.x; int NY = size.y; int NZ = size.z; int sz[3] = {size.x, size.y, size.z}; PM_Start(tm_JacobiSmoother_Calc, 0, 0, true); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real pomega = omega; jacobi_smoother_mask_( x0, x, Ap, Aw, Ae, As, An, Ab, At, b, mask, &pomega, sz, &vc); } PM_Stop(tm_JacobiSmoother_Calc, 0, 0, 16.0*NX*NY*NZ, NB); PM_Start(tm_JacobiSmoother_Swap, 0, 0, true); LSSwap(*plsx, *plsx0); PM_Stop(tm_JacobiSmoother_Swap); PM_Start(tm_JacobiSmoother_Comm, 0, 0, true); plsx->ImposeBoundaryCondition(blockManager); PM_Stop(tm_JacobiSmoother_Comm); PM_Stop(tm_JacobiSmoother); } void Jacobi_Smoother_Mask_2( BlockManager& blockManager, LocalScalar3D<real>* plsx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsb, LocalScalar3D<real>* plsx0, LocalScalar3D<int>* plsMaskId, real omega) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real pomega = omega; jacobi_smoother_mask_( x0, x, Ap, Aw, Ae, As, An, Ab, At, b, mask, &pomega, sz, &vc); } plsx0->UpdateVirtualCells(blockManager); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* x = plsx ->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* b = plsb ->GetBlockData(block); real* x0 = plsx0->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); real pomega = omega; jacobi_smoother_mask_( x, x0, Ap, Aw, Ae, As, An, Ab, At, b, mask, &pomega, sz, &vc); } plsx->ImposeBoundaryCondition(blockManager); } void CalcAx_Mask( BlockManager& blockManager, LocalScalar3D<real>* plsAx, LocalScalar3D<real>* plsAp, LocalScalar3D<real>* plsAw, LocalScalar3D<real>* plsAe, LocalScalar3D<real>* plsAs, LocalScalar3D<real>* plsAn, LocalScalar3D<real>* plsAb, LocalScalar3D<real>* plsAt, LocalScalar3D<real>* plsx, LocalScalar3D<int>* plsMaskId) { int vc = plsx->GetVC(); #ifdef _BLOCK_IS_LARGE_ #else #pragma omp parallel for #endif for (int n=0; n<blockManager.getNumBlock(); ++n) { BlockBase* block = blockManager.getBlock(n); ::Vec3i blockSize = block->getSize(); ::Vec3d cellSize = block->getCellSize(); int sz[3] = {blockSize.x, blockSize.y, blockSize.z}; real* Ax = plsAx->GetBlockData(block); real* Ap = plsAp->GetBlockData(block); real* Aw = plsAw->GetBlockData(block); real* Ae = plsAe->GetBlockData(block); real* As = plsAs->GetBlockData(block); real* An = plsAn->GetBlockData(block); real* Ab = plsAb->GetBlockData(block); real* At = plsAt->GetBlockData(block); real* x = plsx ->GetBlockData(block); int* mask = plsMaskId->GetBlockData(block); calc_ax_mask_( Ax, Ap, Aw, Ae, As, An, Ab, At, x, mask, sz, &vc); } } }; #endif
exception.h
/** * @file exception.h - framework for exceptions in PALISADE * @author TPOC: contact@palisade-crypto.org * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. THIS SOFTWARE IS * PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef SRC_CORE_LIB_UTILS_EXCEPTION_H_ #define SRC_CORE_LIB_UTILS_EXCEPTION_H_ #include <exception> #include <iostream> #include <mutex> #include <stdexcept> #include <string> namespace lbcrypto { // Exceptions thrown inside of a critical region, or inside of an omp thread, // must be caught in the same thread where thrown, or Bad Things Happen // // This class is used to catch and rethrow exceptions from threads/critical // regions (thank you stack overflow) class ThreadException { std::exception_ptr Ptr; std::mutex Lock; public: ThreadException() : Ptr(nullptr) {} ~ThreadException() {} void Rethrow() { if (this->Ptr) std::rethrow_exception(this->Ptr); } void CaptureException() { std::unique_lock<std::mutex> guard(this->Lock); this->Ptr = std::current_exception(); } template <typename Function, typename... Parameters> void Run(Function f, Parameters... params) { try { f(params...); } catch (...) { CaptureException(); } } }; // how to use ThreadException // To use this, declare an instance of the object before the critical // region/thread, catch exceptions in thread with CaptureException, then after // the region call object.Rethrow() // #pragma omp parallel for // for (unsigned i = 0; i < rv.size(); i++) try { // rv.polys[i] = (polys[i].*f)(); // } catch (...) { // e.CaptureException(); // } // e.Rethrow(); // // // use of Run looks like: // ThreadException e; // #pragma omp parallel for // for (int i = 0; i < n; i++) { // e.Run([=] { // // code that might throw // // ... // }); // } // e.Rethrow(); class palisade_error : public std::runtime_error { std::string filename; int linenum; std::string message; public: palisade_error(const std::string& file, int line, const std::string& what) : std::runtime_error(what), filename(file), linenum(line) { message = filename + ":" + std::to_string(linenum) + " " + what; } const char* what() const throw() { return message.c_str(); } const std::string& GetFilename() const { return filename; } const int GetLinenum() const { return linenum; } }; class config_error : public palisade_error { public: config_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; class math_error : public palisade_error { public: math_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; class not_implemented_error : public palisade_error { public: not_implemented_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; class not_available_error : public palisade_error { public: not_available_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; class type_error : public palisade_error { public: type_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; // use this error when serializing palisade objects class serialize_error : public palisade_error { public: serialize_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; // use this error when deserializing palisade objects class deserialize_error : public palisade_error { public: deserialize_error(const std::string& file, int line, const std::string& what) : palisade_error(file, line, what) {} }; #define PALISADE_THROW(exc, expr) throw exc(__FILE__, __LINE__, (expr)) } // namespace lbcrypto #endif /* SRC_CORE_LIB_UTILS_EXCEPTION_H_ */
GB_unop__trunc_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fp32_fp32) // op(A') function: GB (_unop_tran__trunc_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = truncf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = truncf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = truncf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = truncf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = truncf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree-traverse-alg-openmp3x.c
/********************************************************************************* OpenMP-3.0 Example Codes Beta-v1.0 File : tree-traverse-alg-openmp3x.c Date : Aug 2011 Description : The example program demonstrate the use of openmp new feature task construct to perform the parallelization of hierarchical algorithum (inorder tree traversal) and compare it with the openmp-2.5 approach using single construct & dispaly the time taken in both approcahes a) inorderTraverse_Task() :Uses OpenMP-3.0 Task Construct which is an efficient approah. Whenever a thread encounters a task construct, a new explicit task, An explicit task may be executed by any thread in the current team, in parallel with other tasks.In this approach the several task can be executed in parallel. b) inorderTraverse_Wtask() : Uses OpenMP-2.5 parallel - section directive. whenever a thread encounter the parallel region it creates the team of threads. When the thread in a team encounter the section directive it execute the section region.This approach can be costly, however, because of the overhead of parallel region creation, the risk of oversubscribing system resources, difficulties in load balancing, and different behaviors of different implementations OpenMP pragma/ Directive used : #pragma omp parallel #pragma omp section #pragma omp task Input : - Number of Nodes in the link list - Number of threads to be used Output : Time taken in both approach. *********************************************************************************/ /* Header file inclusion */ # include<stdio.h> # include<stdlib.h> # include <omp.h> /* Global variable declaration */ long int totalNodes,numThreads; FILE *fp; /* Structure define tree node */ struct node { int info ; struct node *left; struct node *right; }*root,*p,*q; /* Function declaration */ struct node *createTree(); void leftNode(struct node * ,int ); void rightNode(struct node * ,int ); struct node *createNewNode(int); void inorderTraverse_Task(struct node *); void inorderTraverse_Wtask(struct node *); /* Main Function */ int main(int argc,char **argv) { int index=0, value,count=1; double start_time,end_time; /* Checking for command line arguments */ if( argc != 3 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <total-nodes> <No. of Threads>\n"); exit(-1); } /* Initalizing the Number of nodes in the tree and the Number of threads*/ totalNodes =atoi(argv[1]); numThreads =atoi(argv[2]); if(totalNodes<=0) { printf("\n\t Error : Number of nodes should be greater then 0\n"); exit(-1); } /* Creating the data input file */ if((fp = fopen("tree-input.dat","w"))==NULL) { printf("\n\t Failed to open the file \n"); exit(-1); } /* Writing the input data to the file */ for (index=0; index<totalNodes ; index++) fprintf(fp,"%d ",rand()); fclose(fp); /* Function call to create the tree */ root = createTree(); start_time=omp_get_wtime(); /* Creating the parallel region */ #pragma omp parallel num_threads(numThreads) { /* Restricting the one thread to do the work */ #pragma omp single inorderTraverse_Task(root); /* Function call to perform the tree * traversal using task construct */ } /* end of parallel region */ end_time=omp_get_wtime(); printf("\n\t\t Total Nodes : %ld ",totalNodes); printf("\n\t\t Number of Threads : %ld ",numThreads); printf("\n\t\t Time Taken ( Task Construct : OpenMp-3.0 ) : %lf sec",(end_time-start_time)); start_time=omp_get_wtime(); /* function call to perform the tree * traversal without task construct */ inorderTraverse_Wtask(root); end_time=omp_get_wtime(); printf("\n\t\t Time Taken (Parallel Section Directive : OpenMP-2.5) : %lf sec ",(end_time-start_time)); printf("\n\n\t Inorder tree traversal using Task Construct .................Done \n"); return 0; }/* End of main */ /* Description : Function to create the Binary Search tree @return : Retun the pointer to the root node */ struct node *createTree() { int index,value; /* Open the file to read the input data */ if((fp = fopen("tree-input.dat","r"))==NULL) { printf("\n\t Failed to open the file \n"); exit(-1); } index=totalNodes; /* reading the first element */ fscanf(fp,"%d",&value); /* Function call to create the first Node of the tree */ root=createNewNode(value); /* Iterate over the loop to create the tree */ while (index >1 ) { /* Reading the data from the input file */ fscanf(fp,"%d",&value); p=root; q=root; if( value == p->info) continue; else { /* Check the condition for node insertion in right or left */ while(value!=p->info && q!=NULL) { p=q; if(value < p->info ) q = p->left; else q = p->right; } if( value < p->info) { /* If the value is less then the node value then insert the value in left */ leftNode(p,value); } else { /* If the value is greater then the node value then insert the value in right */ rightNode(p,value); } index--; } } /* End of while loop */ fclose(fp); return root; } /* End of Function */ /* Description : Helper function to create the new node @param[value] : Data value of the node */ struct node *createNewNode(int value ) { struct node *newnode; /* Allocating the memory to create the new node */ if((newnode=(struct node *)malloc(sizeof(struct node)))==NULL){ perror("\n\t Memory allocation for newnode "); exit(-1); } newnode->info=value; newnode->right=newnode->left=NULL; return(newnode); } /* Description : Function to create the left node of the tree @param [*r] : Position to insert the node @param[value : data value in the node */ void leftNode(struct node *r,int value ) { if(r->left!=NULL) printf("\n Invalid !"); else r->left=createNewNode(value); } /* Description : Function to create the right node of the tree @param [*r] : Position to insert the node @param[value : data value in the node */ void rightNode(struct node *r,int value) { if(r->right!=NULL) printf("\n Invalid !"); else r->right=createNewNode(value); } /* Description :Uses OpenMP-3.0 Task Construct which is an efficient approah. Whenever a thread encounters a task construct,a new explicit task, An explicit task may be executed by any thread in the current team, in parallel with other tasks.In this approach the several task can be executed in parallel. @param : starting pointer of the tree */ void inorderTraverse_Task(struct node *r) { /* Generating task to traverse Left Subtree by the team of threads*/ if(r->left!=NULL){ #pragma omp task inorderTraverse_Task(r->left); } /* Generating task to traverse Right Subtree by the team of thread*/ if(r->right!=NULL){ #pragma omp task inorderTraverse_Task(r->right); } } /* Description :Uses OpenMP-2.5 parallel - section directive. whenever a thread encounter the parallel region it creates the team of threads. When the thread in a team encounter the section directive it execute the section region. Then the sections will be executed by different threads parallely.This approach can be costly, however, because of the overhead of parallel region creation, the risk of oversubscribing system resources, difficulties in load balancing, and different behaviors of different implementations @param : starting pointer of the tree */ void inorderTraverse_Wtask(struct node *r) { omp_set_num_threads(numThreads); /* Create the parallel region */ //#pragma omp parallel sections num_threads(2) #pragma omp parallel sections { /* Creating section to traverse the Left subtree by a thread 1 */ #pragma omp section if(r->left!=NULL){ inorderTraverse_Wtask(r->left); } /* Creating section to traverse the Right subtree by a thread 2 */ #pragma omp section if(r->right!=NULL){ inorderTraverse_Wtask(r->right); } } }
random_par.c
//********************************************************** // Parallel Pseudo random number generator: // // USAGE: // // The pseudo random sequence is seeded with a range // // void seed(lower_limit, higher_limit) // // and then subsequent calls to the random number generator // generates values in the sequence: // // double drandom() // // A leap frog method is used to assure non-overlapping // sequences for each thread. // // Note: these functions are to be called from inside the // the OpenMP parallel region that will use the sequence. // // BACKGROUND: // // We are using a modulus of 2^31-1 and a multiplier from // the Hoaglin LCGs in the following article: // // http://random.mat.sbg.ac.at/~charly/server/node3.html#lcg // // we are using a zero addend just to make the leap frog // algorithm easier to implement. // // HISTORY: // // 9/2008: Written by Tim Mattson by cutting and pasting // from a generator written by Larry Meadows // //*********************************************************** #include <omp.h> static unsigned long long MULTIPLIER = 764261123; static unsigned long long PMOD = 2147483647; static unsigned long long mult_n; double random_low, random_hi; #define MAX_THREADS 128 static unsigned long long pseed[MAX_THREADS][64]; //[4] to padd to cache line //size to avoid false sharing unsigned long long random_last = 0; #pragma omp threadprivate(random_last) //threadprivate不是针对某一个并行块,而是作用于整个程序。 double drandom() { unsigned long long random_next; double ret_val; // // compute an integer random number from zero to mod // random_next = (unsigned long long)((mult_n * random_last)% PMOD); random_last = random_next; // // shift into preset range // ret_val = ((double)random_next/(double)PMOD)*(random_hi-random_low)+random_low; return ret_val; } // // set the seed, the multiplier and the range // void seed(double low_in, double hi_in) { int i, id, nthreads; unsigned long long iseed; id = omp_get_thread_num(); #pragma omp single { if(low_in < hi_in) { random_low = low_in; random_hi = hi_in; } else { random_low = hi_in; random_hi = low_in; } // // The Leapfrog method ... adjust the multiplier so you stride through // the sequence by increments of "nthreads" and adust seeds so each // thread starts with the right offset // nthreads = omp_get_num_threads(); iseed = PMOD/MULTIPLIER; // just pick a reasonable seed pseed[0][0] = iseed; mult_n = MULTIPLIER; for (i = 1; i < nthreads; ++i) { iseed = (unsigned long long)((MULTIPLIER * iseed) % PMOD); pseed[i][0] = iseed; mult_n = (mult_n * MULTIPLIER) % PMOD; } } random_last = (unsigned long long) pseed[id][0]; }
conversion.h
/* * Copyright 2008-2009 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Modifications to this file: * Copyright (c) 2014-2015, The University of Queensland * Licensed under the Apache License, Version 2.0. * */ #pragma once #include <cusp/cmath.h> #include <cusp/ell_matrix.h> #include <cusp/exception.h> #include <cusp/detail/host/conversion_utils.h> #include <thrust/fill.h> #include <thrust/extrema.h> #include <thrust/count.h> namespace cusp { namespace detail { namespace host { ///////////////////// // CDS Conversions // ///////////////////// template <typename Matrix1, typename Matrix2> void cds_to_csr(const Matrix1& src, Matrix2& dst) { //typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; size_t num_entries = 0; size_t num_diagonals = src.diagonal_offsets.size(); // count nonzero entries for (size_t i = 0; i < num_diagonals; i++) { num_entries += src.block_size*src.block_size*(src.num_rows-cusp::abs(src.diagonal_offsets[i])*src.block_size)/src.block_size; } dst.resize(src.num_rows, src.num_rows, num_entries); num_entries = 0; dst.row_offsets[0] = 0; // copy nonzero entries to CSR structure for(size_t i = 0; i < src.num_rows; i++) { for(size_t n = 0; n < num_diagonals; n++) { if (src.diagonal_offsets[n] < 0 && i < cusp::abs(src.diagonal_offsets[n])*src.block_size) continue; if (src.diagonal_offsets[n] > 0 && i >= src.num_rows-src.diagonal_offsets[n]*src.block_size) continue; for (size_t j = 0; j < src.block_size; j++) { const ValueType value = src.values(i, n*src.block_size+j); dst.column_indices[num_entries] = (src.diagonal_offsets[n]+i/src.block_size)*src.block_size+j; dst.values[num_entries] = value; num_entries++; } } dst.row_offsets[i + 1] = num_entries; } } template <typename Matrix1, typename Matrix2> void cds_to_dia(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; const size_t num_diagonalblocks = src.diagonal_offsets.size(); // number of diagonals for DIA format const size_t num_diagonals = num_diagonalblocks*(2*src.block_size-1); cusp::array1d<IndexType,cusp::host_memory> diagonal_offsets(num_diagonals, 0); size_t num_entries = 0; // count entries (with zero fill-in) for DIA format // and determine diagonal offsets for (size_t i = 0, n = 0; i < num_diagonalblocks; i++) { for (IndexType j = 1-(IndexType)src.block_size; j < (IndexType)src.block_size; j++) { const IndexType offset = src.diagonal_offsets[i] * src.block_size + j; diagonal_offsets[n++] = offset; num_entries += src.num_rows - cusp::abs(offset); } } // prepare destination matrix dst.resize(src.num_rows, src.num_rows, num_entries, num_diagonals); cusp::copy(diagonal_offsets, dst.diagonal_offsets); thrust::fill(dst.values.values.begin(), dst.values.values.end(), ValueType(0)); for (size_t i = 0; i < num_diagonalblocks; i++) { const IndexType k = src.diagonal_offsets[i]*src.block_size; const IndexType first_row = std::max(0, -k); const IndexType num_rows = src.num_rows - cusp::abs(k); // row index is identical for CDS and DIA for (IndexType row = first_row; row < first_row+num_rows; row++) { for (IndexType j = 0; j < src.block_size; j++) { // index of "column" in DIA structure const IndexType col = i*(2*src.block_size-1)+(src.block_size-1-row%src.block_size)+j; dst.values(row,col) = src.values(row, i*src.block_size+j); } } } } ///////////////////// // COO Conversions // ///////////////////// template <typename Matrix1, typename Matrix2> void coo_to_csr(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; //typedef typename Matrix2::value_type ValueType; dst.resize(src.num_rows, src.num_cols, src.num_entries); // compute number of non-zero entries per row of A thrust::fill(dst.row_offsets.begin(), dst.row_offsets.end(), IndexType(0)); for (size_t n = 0; n < src.num_entries; n++) dst.row_offsets[src.row_indices[n]]++; // cumsum the num_entries per row to get dst.row_offsets[] IndexType cumsum = 0; for(size_t i = 0; i < src.num_rows; i++) { IndexType temp = dst.row_offsets[i]; dst.row_offsets[i] = cumsum; cumsum += temp; } dst.row_offsets[src.num_rows] = cumsum; // write Aj,Ax into dst.column_indices,dst.values for(size_t n = 0; n < src.num_entries; n++) { IndexType row = src.row_indices[n]; IndexType dest = dst.row_offsets[row]; dst.column_indices[dest] = src.column_indices[n]; dst.values[dest] = src.values[n]; dst.row_offsets[row]++; } IndexType last = 0; for(size_t i = 0; i <= src.num_rows; i++) { IndexType temp = dst.row_offsets[i]; dst.row_offsets[i] = last; last = temp; } //csr may contain duplicates } template <typename Matrix1, typename Matrix2> void coo_to_array2d(const Matrix1& src, Matrix2& dst) { //typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; dst.resize(src.num_rows, src.num_cols); thrust::fill(dst.values.begin(), dst.values.end(), ValueType(0)); for(size_t n = 0; n < src.num_entries; n++) dst(src.row_indices[n], src.column_indices[n]) += src.values[n]; //sum duplicates } ///////////////////// // CSR Conversions // ///////////////////// template <typename Matrix1, typename Matrix2> void csr_to_cds(const Matrix1& src, Matrix2& dst, const size_t alignment = 32) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; // compute number of occupied diagonals and enumerate them size_t num_diagonals = 0; // block size is always 1 for this conversion size_t block_size = 1; cusp::array1d<IndexType,cusp::host_memory> diag_map(src.num_rows + src.num_cols, 0); for(size_t i = 0; i < src.num_rows; i++) { for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++) { size_t j = src.column_indices[jj]; size_t map_index = (src.num_rows - i) + j; //offset shifted by + num_rows if(diag_map[map_index] == 0) { diag_map[map_index] = 1; num_diagonals++; } } } // allocate CDS structure dst.resize(src.num_rows, src.num_entries, num_diagonals, block_size, alignment); // fill in diagonal_offsets array for(size_t n = 0, diag = 0; n < src.num_rows + src.num_cols; n++) { if(diag_map[n] == 1) { diag_map[n] = diag; dst.diagonal_offsets[diag] = (IndexType) n - (IndexType) src.num_rows; diag++; } } // fill in values array thrust::fill(dst.values.values.begin(), dst.values.values.end(), ValueType(0)); for(size_t i = 0; i < src.num_rows; i++) { for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++) { size_t j = src.column_indices[jj]; size_t map_index = (src.num_rows - i) + j; //offset shifted by + num_rows size_t diag = diag_map[map_index]; dst.values(i, diag) = src.values[jj]; } } } template <typename Matrix1, typename Matrix2> void csr_to_coo(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; //typedef typename Matrix2::value_type ValueType; dst.resize(src.num_rows, src.num_cols, src.num_entries); // TODO replace with offsets_to_indices for(size_t i = 0; i < src.num_rows; i++) for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i + 1]; jj++) dst.row_indices[jj] = i; cusp::copy(src.column_indices, dst.column_indices); cusp::copy(src.values, dst.values); } template <typename Matrix1, typename Matrix2> void csr_to_dia(const Matrix1& src, Matrix2& dst, const size_t alignment = 32) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; // compute number of occupied diagonals and enumerate them size_t num_diagonals = 0; cusp::array1d<IndexType,cusp::host_memory> diag_map(src.num_rows + src.num_cols, 0); for(size_t i = 0; i < src.num_rows; i++) { for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++) { size_t j = src.column_indices[jj]; size_t map_index = (src.num_rows - i) + j; //offset shifted by + num_rows if(diag_map[map_index] == 0) { diag_map[map_index] = 1; num_diagonals++; } } } // allocate DIA structure dst.resize(src.num_rows, src.num_cols, src.num_entries, num_diagonals, alignment); // fill in diagonal_offsets array for(size_t n = 0, diag = 0; n < src.num_rows + src.num_cols; n++) { if(diag_map[n] == 1) { diag_map[n] = diag; dst.diagonal_offsets[diag] = (IndexType) n - (IndexType) src.num_rows; diag++; } } // fill in values array thrust::fill(dst.values.values.begin(), dst.values.values.end(), ValueType(0)); for(size_t i = 0; i < src.num_rows; i++) { for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++) { size_t j = src.column_indices[jj]; size_t map_index = (src.num_rows - i) + j; //offset shifted by + num_rows size_t diag = diag_map[map_index]; dst.values(i, diag) = src.values[jj]; } } } template <typename Matrix1, typename Matrix2> void csr_to_hyb(const Matrix1& src, Matrix2& dst, const size_t num_entries_per_row, const size_t alignment = 32) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; // The ELL portion of the HYB matrix will have 'num_entries_per_row' columns. // Nonzero values that do not fit within the ELL structure are placed in the // COO format portion of the HYB matrix. // compute number of nonzeros in the ELL and COO portions size_t num_ell_entries = 0; for(size_t i = 0; i < src.num_rows; i++) num_ell_entries += thrust::min<size_t>(num_entries_per_row, src.row_offsets[i+1] - src.row_offsets[i]); IndexType num_coo_entries = src.num_entries - num_ell_entries; dst.resize(src.num_rows, src.num_cols, num_ell_entries, num_coo_entries, num_entries_per_row, alignment); const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index; // pad out ELL format with zeros thrust::fill(dst.ell.column_indices.values.begin(), dst.ell.column_indices.values.end(), invalid_index); thrust::fill(dst.ell.values.values.begin(), dst.ell.values.values.end(), ValueType(0)); for(size_t i = 0, coo_nnz = 0; i < src.num_rows; i++) { size_t n = 0; IndexType jj = src.row_offsets[i]; // copy up to num_cols_per_row values of row i into the ELL while(jj < src.row_offsets[i+1] && n < num_entries_per_row) { dst.ell.column_indices(i,n) = src.column_indices[jj]; dst.ell.values(i,n) = src.values[jj]; jj++, n++; } // copy any remaining values in row i into the COO while(jj < src.row_offsets[i+1]) { dst.coo.row_indices[coo_nnz] = i; dst.coo.column_indices[coo_nnz] = src.column_indices[jj]; dst.coo.values[coo_nnz] = src.values[jj]; jj++; coo_nnz++; } } } template <typename Matrix1, typename Matrix2> void csr_to_ell(const Matrix1& src, Matrix2& dst, const size_t num_entries_per_row, const size_t alignment = 32) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; // compute number of nonzeros size_t num_entries = 0; #pragma omp parallel for reduction( +: num_entries) for(size_t i = 0; i < src.num_rows; i++) num_entries += thrust::min<size_t>(num_entries_per_row, src.row_offsets[i+1] - src.row_offsets[i]); dst.resize(src.num_rows, src.num_cols, num_entries, num_entries_per_row, alignment); const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index; // pad out ELL format with zeros thrust::fill(dst.column_indices.values.begin(), dst.column_indices.values.end(), invalid_index); thrust::fill(dst.values.values.begin(), dst.values.values.end(), ValueType(0)); #pragma omp parallel for for(size_t i = 0; i < src.num_rows; i++) { size_t n = 0; IndexType jj = src.row_offsets[i]; // copy up to num_cols_per_row values of row i into the ELL while(jj < src.row_offsets[i+1] && n < num_entries_per_row) { dst.column_indices(i,n) = src.column_indices[jj]; dst.values(i,n) = src.values[jj]; jj++, n++; } } } template <typename Matrix1, typename Matrix2> void csr_to_array2d(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; dst.resize(src.num_rows, src.num_cols); thrust::fill(dst.values.begin(), dst.values.end(), ValueType(0)); for(size_t i = 0; i < src.num_rows; i++) for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++) dst(i, src.column_indices[jj]) += src.values[jj]; //sum duplicates } ///////////////////// // DIA Conversions // ///////////////////// template <typename Matrix1, typename Matrix2> void dia_to_cds(const Matrix1& src, Matrix2& dst) { //typedef typename Matrix2::index_type IndexType; //typedef typename Matrix2::value_type ValueType; // allocate CDS structure dst.resize(src.num_rows, src.num_entries, src.diagonal_offsets.size(), 1, src.values.pitch); cusp::copy(src.diagonal_offsets, dst.diagonal_offsets); cusp::copy(src.values, dst.values); } template <typename Matrix1, typename Matrix2> void dia_to_csr(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; size_t num_entries = 0; size_t num_diagonals = src.diagonal_offsets.size(); // count nonzero entries for(size_t i = 0; i < src.num_rows; i++) { for(size_t n = 0; n < num_diagonals; n++) { const IndexType j = i + src.diagonal_offsets[n]; if(j >= 0 && static_cast<size_t>(j) < src.num_cols && src.values(i,n) != ValueType(0)) num_entries++; } } dst.resize(src.num_rows, src.num_cols, num_entries); num_entries = 0; dst.row_offsets[0] = 0; // copy nonzero entries to CSR structure for(size_t i = 0; i < src.num_rows; i++) { for(size_t n = 0; n < num_diagonals; n++) { const IndexType j = i + src.diagonal_offsets[n]; if(j >= 0 && static_cast<size_t>(j) < src.num_cols) { const ValueType value = src.values(i, n); if (value != ValueType(0)) { dst.column_indices[num_entries] = j; dst.values[num_entries] = value; num_entries++; } } } dst.row_offsets[i + 1] = num_entries; } } ///////////////////// // ELL Conversions // ///////////////////// template <typename Matrix1, typename Matrix2> void ell_to_coo(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index; dst.resize(src.num_rows, src.num_cols, src.num_entries); size_t num_entries = 0; const size_t num_entries_per_row = src.column_indices.num_cols; for(size_t i = 0; i < src.num_rows; i++) { for(size_t n = 0; n < num_entries_per_row; n++) { const IndexType j = src.column_indices(i,n); const ValueType v = src.values(i,n); if(j != invalid_index) { dst.row_indices[num_entries] = i; dst.column_indices[num_entries] = j; dst.values[num_entries] = v; num_entries++; } } } } template <typename Matrix1, typename Matrix2> void ell_to_csr(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index; dst.resize(src.num_rows, src.num_cols, src.num_entries); size_t num_entries = 0; dst.row_offsets[0] = 0; const size_t num_entries_per_row = src.column_indices.num_cols; for(size_t i = 0; i < src.num_rows; i++) { for(size_t n = 0; n < num_entries_per_row; n++) { const IndexType j = src.column_indices(i,n); const ValueType v = src.values(i,n); if(j != invalid_index) { dst.column_indices[num_entries] = j; dst.values[num_entries] = v; num_entries++; } } dst.row_offsets[i + 1] = num_entries; } } ///////////////////// // HYB Conversions // ///////////////////// template <typename Matrix1, typename Matrix2> void hyb_to_coo(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; dst.resize(src.num_rows, src.num_cols, src.num_entries); const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index; const size_t num_entries_per_row = src.ell.column_indices.num_cols; size_t num_entries = 0; size_t coo_progress = 0; // merge each row of the ELL and COO parts into a single COO row for(size_t i = 0; i < src.num_rows; i++) { // append the i-th row from the ELL part for(size_t n = 0; n < num_entries_per_row; n++) { const IndexType j = src.ell.column_indices(i,n); const ValueType v = src.ell.values(i,n); if(j != invalid_index) { dst.row_indices[num_entries] = i; dst.column_indices[num_entries] = j; dst.values[num_entries] = v; num_entries++; } } // append the i-th row from the COO part while (coo_progress < src.coo.num_entries && static_cast<size_t>(src.coo.row_indices[coo_progress]) == i) { dst.row_indices[num_entries] = i; dst.column_indices[num_entries] = src.coo.column_indices[coo_progress]; dst.values[num_entries] = src.coo.values[coo_progress]; num_entries++; coo_progress++; } } } template <typename Matrix1, typename Matrix2> void hyb_to_csr(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; dst.resize(src.num_rows, src.num_cols, src.num_entries); const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index; const size_t num_entries_per_row = src.ell.column_indices.num_cols; size_t num_entries = 0; dst.row_offsets[0] = 0; size_t coo_progress = 0; // merge each row of the ELL and COO parts into a single CSR row for(size_t i = 0; i < src.num_rows; i++) { // append the i-th row from the ELL part for(size_t n = 0; n < num_entries_per_row; n++) { const IndexType j = src.ell.column_indices(i,n); const ValueType v = src.ell.values(i,n); if(j != invalid_index) { dst.column_indices[num_entries] = j; dst.values[num_entries] = v; num_entries++; } } // append the i-th row from the COO part while (coo_progress < src.coo.num_entries && static_cast<size_t>(src.coo.row_indices[coo_progress]) == i) { dst.column_indices[num_entries] = src.coo.column_indices[coo_progress]; dst.values[num_entries] = src.coo.values[coo_progress]; num_entries++; coo_progress++; } dst.row_offsets[i + 1] = num_entries; } } ///////////////////////// // Array1d Conversions // ///////////////////////// template <typename Matrix1, typename Matrix2> void array2d_to_array1d(const Matrix1& src, Matrix2& dst) { if (src.num_rows == 0 && src.num_cols == 0) { dst.resize(0); } else if (src.num_cols == 1) { dst.resize(src.num_rows); for (size_t i = 0; i < src.num_rows; i++) dst[i] = src(i,0); } else if (src.num_rows == 1) { dst.resize(src.num_cols); for (size_t j = 0; j < src.num_cols; j++) dst[j] = src(0,j); } else { throw cusp::format_conversion_exception("array2d to array1d conversion is only defined for row or column vectors"); } } ///////////////////////// // Array2d Conversions // ///////////////////////// template <typename Matrix1, typename Matrix2> void array1d_to_array2d(const Matrix1& src, Matrix2& dst) { dst.resize(src.size(),1); for (size_t i = 0; i < src.size(); i++) dst(i,0) = src[i]; } template <typename Matrix1, typename Matrix2> void array2d_to_coo(const Matrix1& src, Matrix2& dst) { //typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; // count number of nonzero entries in array size_t nnz = 0; for(size_t i = 0; i < src.num_rows; i++) { for(size_t j = 0; j < src.num_cols; j++) { if (src(i,j) != ValueType(0)) nnz++; } } dst.resize(src.num_rows, src.num_cols, nnz); nnz = 0; for(size_t i = 0; i < src.num_rows; i++) { for(size_t j = 0; j < src.num_cols; j++) { if (src(i,j) != ValueType(0)) { dst.row_indices[nnz] = i; dst.column_indices[nnz] = j; dst.values[nnz] = src(i,j); nnz++; } } } } template <typename Matrix1, typename Matrix2> void array2d_to_csr(const Matrix1& src, Matrix2& dst) { typedef typename Matrix2::index_type IndexType; typedef typename Matrix2::value_type ValueType; IndexType nnz = src.num_entries - thrust::count(src.values.begin(), src.values.end(), ValueType(0)); dst.resize(src.num_rows, src.num_cols, nnz); IndexType num_entries = 0; for(size_t i = 0; i < src.num_rows; i++) { dst.row_offsets[i] = num_entries; for(size_t j = 0; j < src.num_cols; j++) { if (src(i,j) != ValueType(0)) { dst.column_indices[num_entries] = j; dst.values[num_entries] = src(i,j); num_entries++; } } } dst.row_offsets[src.num_rows] = num_entries; } } // end namespace host } // end namespace detail } // end namespace cusp
test_sign_verify.c
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Sign a message and verify the signature */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <pqnist/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 #define SIGLEN BFS_BLS381+1 int main() { int i,rc; // Seed value for CSPRNG char seed[NTHREADS][PQNIST_SEED_LENGTH]; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // BLS signature char s[NTHREADS][SIGLEN]; octet S[NTHREADS]; // Initialise seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seed[i][j] = i; } } // Generate SIKE and BLS keys // Bob's SIKE keys (not used) uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; // Alice's BLS keys char BLSsk[NTHREADS][BGS_BLS381]; char BLSpk[NTHREADS][G2LEN]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { rc = pqnist_keys(seed[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]); if (rc) { fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc); exit(EXIT_FAILURE); } printf("BLS pklen %d pk: ", G2LEN); amcl_print_hex(BLSpk[i], G2LEN); printf("BLS sklen %d BLS sk: ", BGS_BLS381); amcl_print_hex(BLSsk[i], BGS_BLS381); printf("\n"); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; printf("Alice Plaintext: "); OCT_output_string(&P[i]); printf("\n"); } for(i=0; i<NTHREADS; i++) { bzero(s[i],sizeof(s[i])); S[i].max = SIGLEN; S[i].len = SIGLEN; S[i].val = s[i]; } #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Alice signs message rc = pqnist_sign(P[i].val, BLSsk[i], S[i].val); if(rc) { fprintf(stderr, "FAILURE pqnist_sign rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice SIGlen %d SIG", S[i].len); OCT_output(&S[i]); printf("\n"); } #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Bob verifies message rc = pqnist_verify(P[i].val, BLSpk[i], S[i].val); if (rc) { fprintf(stderr, "FAILURE pqnist_verify rc: %d\n", rc); exit(EXIT_FAILURE); } else { printf("Test %d pqnist_verify rc: %d\n", i, rc); OCT_output_string(&P[i]); printf("\n"); } } // clear memory for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OCT_clear(&P[i]); OCT_clear(&S[i]); } printf("SUCCESS\n"); exit(EXIT_SUCCESS); }
ExemplarClusteringSubmodularFunction.h
#ifndef EXEMCL_FUNCTION_CPU #define EXEMCL_FUNCTION_CPU #include <src/function/SubmodularFunction.h> #include <utility> namespace exemcl::cpu { /** * This class provides a CPU implementation of the submodular function of exemplar-based clustering. */ template<typename HostDataType = float> class ExemplarClusteringSubmodularFunction : public SubmodularFunction { public: using SubmodularFunction::operator(); /** * Constructs the exemplar clustering submodular function using a ground set V. * * @param V The ground set V. */ explicit ExemplarClusteringSubmodularFunction(const MatrixX<HostDataType>& V, int workerCount = -1) : SubmodularFunction(workerCount), _V(std::make_unique<MatrixX<HostDataType>>(V)) { MatrixX<HostDataType> zeroVec = VectorX<HostDataType>::Zero(_V->cols()).transpose(); _zeroVecValue = L(zeroVec); }; /** * Evaluates the exemplar cluster-submodular function. * * @param S The set to evaluate. * @return The submodular function value. */ double operator()(const MatrixX<double>& S) override { return ((const ExemplarClusteringSubmodularFunction*) (this))->operator()(S); }; /** * Evaluates the exemplar cluster-submodular function. * * @param S The set to evaluate. * @return The submodular function value. */ double operator()(const MatrixX<double>& S) const override { auto S_copy = std::make_unique<MatrixX<HostDataType>>(S.cast<HostDataType>()); // Add zero vector to data copy. S_copy->conservativeResize(S_copy->rows() + 1, Eigen::NoChange_t()); S_copy->row(S_copy->rows() - 1).setZero(); // Make calculations. HostDataType L_2 = L(*S_copy); return _zeroVecValue - L_2; }; /** * Returns a reference to the ground set V. * @return As stated above. */ const MatrixX<HostDataType>& getV() const { return _V; }; private: HostDataType _zeroVecValue; const std::unique_ptr<MatrixX<HostDataType>> _V; /** * Calculates the L function. * * @param S_inner Set of data to calculate the L function for. * @return L function value. */ HostDataType L(const MatrixX<HostDataType>& S_inner) const { auto* accuArray = new HostDataType[_V->rows()]; for (unsigned int i = 0; i < _V->rows(); i++) { auto min_val = std::numeric_limits<HostDataType>::max(); for (unsigned int j = 0; j < S_inner.rows(); j++) min_val = std::min((_V->row(i) - S_inner.row(j)).squaredNorm(), min_val); accuArray[i] = min_val; } HostDataType accu = 0.0; #pragma omp simd reduction(+ : accu) for (unsigned int i = 0; i < _V->rows(); i++) accu += accuArray[i]; delete[] accuArray; return accu / static_cast<HostDataType>(_V->rows()); }; }; } #endif // EXEMCL_FUNCTION_CPU
relu_hcl_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #ifndef _RELU_KERNEL_ARM_H_ #define _RELU_KERNEL_ARM_H_ #include <arm_neon.h> #include "../../../../../include/tengine_ir.h" #include "../../../../op/relu_param.h" static int perf_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; if (negative_slope == 0) { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _zero = vdupq_n_f32(0.f); for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(src); _p = vmaxq_f32(_p, _zero); vst1q_f32(dst, _p); src += 4; dst += 4; } #endif for (; remain > 0; remain--) { if (src[0] < 0) dst[0] = 0; else dst[0] = src[0]; src++; dst++; } } } else { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; #if __ARM_NEON int nn = size >> 2; int remain = size - (nn << 2); #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _slope = vdupq_n_f32(negative_slope); for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(src); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _ps = vmulq_f32(_p, _slope); _p = vbslq_f32(_lemask, _ps, _p); vst1q_f32(dst, _p); src += 4; dst += 4; } #endif for (; remain > 0; remain--) { if (src[0] < 0) dst[0] = src[0] * negative_slope; else dst[0] = src[0]; src++; dst++; } } } return 0; } #endif
par_relax.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "Common.h" #include "_hypre_lapack.h" #include "par_relax.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGRelax *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_type, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { HYPRE_Int relax_error = 0; /*--------------------------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 0 -> Jacobi or CF-Jacobi * relax_type = 1 -> Gauss-Seidel <--- very slow, sequential * relax_type = 2 -> Gauss_Seidel: interior points in parallel, * boundary sequential * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) * relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (backward solve) * relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node * relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor * with outer relaxation parameters * relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR [GPU-supported, CF supported with redundant computation] * relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel * relax_type = 9 -> Direct solve, Gaussian elimination * relax_type = 10 -> On-processor direct forward solve for matrices with * triangular structure (indices need not be ordered * triangular) * relax_type = 11 -> Two Stage approximation to GS. Uses the strict lower * part of the diagonal matrix * relax_type = 12 -> Two Stage approximation to GS. Uses the strict lower * part of the diagonal matrix and a second iteration * for additional error approximation * relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve * relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve * relax_type = 15 -> CG * relax_type = 16 -> Scaled Chebyshev * relax_type = 17 -> FCF-Jacobi * relax_type = 18 -> L1-Jacobi [GPU-supported through call to relax7Jacobi] * relax_type = 19 -> Direct Solve, (old version) * relax_type = 20 -> Kaczmarz * relax_type = 29 -> Direct solve: use gaussian elimination & BLAS * (with pivoting) (old version) * relax_type = 98 -> Direct solve, Gaussian elimination * relax_type = 99 -> Direct solve, Gaussian elimination * relax_type = 199-> Direct solve, Gaussian elimination *-------------------------------------------------------------------------------------*/ switch (relax_type) { case 0: /* Weighted Jacobi */ hypre_BoomerAMGRelax0WeightedJacobi(A, f, cf_marker, relax_points, relax_weight, u, Vtemp); break; case 1: /* Gauss-Seidel VERY SLOW */ hypre_BoomerAMGRelax1GaussSeidel(A, f, cf_marker, relax_points, u); break; case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */ hypre_BoomerAMGRelax2GaussSeidel(A, f, cf_marker, relax_points, u); break; /* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */ case 3: hypre_BoomerAMGRelax3HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */ hypre_BoomerAMGRelax4HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */ hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel(A, f, cf_marker, relax_points, u); break; case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/SSOR on-processor with outer relaxation parameter */ hypre_BoomerAMGRelax6HybridSSOR(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 7: /* Jacobi (uses ParMatvec) */ hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); break; case 8: /* hybrid L1 Symm. Gauss-Seidel */ hypre_BoomerAMGRelax8HybridL1SSOR(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; /* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */ case 10: hypre_BoomerAMGRelax10TopoOrderedGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 11: /* Two Stage Gauss Seidel. Forward sweep only */ hypre_BoomerAMGRelax11TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 12: /* Two Stage Gauss Seidel. Uses the diagonal matrix for the GS part */ hypre_BoomerAMGRelax12TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp); break; case 13: /* hybrid L1 Gauss-Seidel forward solve */ hypre_BoomerAMGRelax13HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; case 14: /* hybrid L1 Gauss-Seidel backward solve */ hypre_BoomerAMGRelax14HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp); break; case 18: /* weighted L1 Jacobi */ hypre_BoomerAMGRelax18WeightedL1Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); break; case 19: /* Direct solve: use gaussian elimination */ relax_error = hypre_BoomerAMGRelax19GaussElim(A, f, u); break; case 20: /* Kaczmarz */ hypre_BoomerAMGRelaxKaczmarz(A, f, omega, l1_norms, u); break; case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */ relax_error = hypre_BoomerAMGRelax98GaussElimPivot(A, f, u); break; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelaxWeightedJacobi_core( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, HYPRE_Int Skip_diag ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Real one_minus_weight = 1.0 - relax_weight; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { Vtemp_data[i] = u_data[i]; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { const HYPRE_Complex di = l1_norms ? l1_norms[i] : A_diag_data[A_diag_i[i]]; /*----------------------------------------------------------- * If i is of the right type ( C or F or All ) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && di != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + Skip_diag; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } if (Skip_diag) { u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / di; } else { u_data[i] += relax_weight * res / di; } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax0WeightedJacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, hypre_ParVector *u, hypre_ParVector *Vtemp ) { return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, NULL, u, Vtemp, 1); } HYPRE_Int hypre_BoomerAMGRelax18WeightedL1Jacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { // XXX GPU calls Relax7 XXX return hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); } else #endif { /* in the case of non-CF, use relax-7 which is faster */ if (relax_points == 0) { return hypre_BoomerAMGRelax7Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp); } else { return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp, 0); } } } HYPRE_Int hypre_BoomerAMGRelax1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST); requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start; for (j = vec_start; j < vec_start+vec_len; j++) { v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr, requests, status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1) - vec_start; hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr, requests, status); } for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_MPI_Barrier(comm); } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax2GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST); requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Relax interior points first *-----------------------------------------------------------------*/ for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i+1] - A_offd_i[i] == zero && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start; for (j = vec_start; j < vec_start+vec_len; j++) { v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr, requests, status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1) - vec_start; hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr, requests, status); } for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i+1] - A_offd_i[i] != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_MPI_Barrier(comm); } } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelaxHybridGaussSeidel_core( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp, HYPRE_Int GS_order, HYPRE_Int Symm, HYPRE_Int Skip_diag, HYPRE_Int forced_seq, HYPRE_Int Topo_order ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = Vtemp ? hypre_ParVectorLocalVector(Vtemp) : NULL; HYPRE_Complex *Vtemp_data = Vtemp_local ? hypre_VectorData(Vtemp_local) : NULL; /* hypre_Vector *Ztemp_local = NULL; HYPRE_Complex *Ztemp_data = NULL; */ HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Int *proc_ordering = NULL; const HYPRE_Real one_minus_omega = 1.0 - omega; HYPRE_Int num_procs, my_id, num_threads, j, num_sends; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = forced_seq ? 1 : hypre_NumThreads(); /* GS order: forward or backward */ const HYPRE_Int gs_order = GS_order > 0 ? 1 : -1; /* for symmetric GS, a forward followed by a backward */ const HYPRE_Int num_sweeps = Symm ? 2 : 1; /* if relax_weight and omega are both 1.0 */ const HYPRE_Int non_scale = relax_weight == 1.0 && omega == 1.0; /* */ const HYPRE_Real prod = 1.0 - relax_weight * omega; /* if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } */ #if defined(HYPRE_USING_PERSISTENT_COMM) // JSP: persistent comm can be similarly used for other smoothers hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (num_procs > 1) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); #if defined(HYPRE_USING_PERSISTENT_COMM) persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); v_ext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); #else v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); #endif HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { v_buf_data[j - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data); #else comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, v_ext_data); #else hypre_ParCSRCommHandleDestroy(comm_handle); #endif comm_handle = NULL; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif } if (Topo_order) { /* Check for ordering of matrix. If stored, get pointer, otherwise * compute ordering and point matrix variable to array. * Used in AIR */ if (!hypre_ParCSRMatrixProcOrdering(A)) { proc_ordering = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, num_rows); hypre_ParCSRMatrixProcOrdering(A) = proc_ordering; } else { proc_ordering = hypre_ParCSRMatrixProcOrdering(A); } } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime(); #endif if ( (num_threads > 1 || !non_scale) && Vtemp_data ) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_rows; j++) { Vtemp_data[j] = u_data[j]; } } if (num_threads > 1) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { HYPRE_Int ns, ne, sweep; hypre_partition1D(num_rows, num_threads, j, &ns, &ne); for (sweep = 0; sweep < num_sweeps; sweep++) { const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1; const HYPRE_Int ibegin = iorder > 0 ? ns : ne - 1; const HYPRE_Int iend = iorder > 0 ? ne : ns - 1; if (non_scale) { hypre_HybridGaussSeidelNSThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag); } else { hypre_HybridGaussSeidelThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega, prod, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag); } } /* for (sweep = 0; sweep < num_sweeps; sweep++) */ } /* for (j = 0; j < num_threads; j++) */ } else /* if (num_threads > 1) */ { HYPRE_Int sweep; for (sweep = 0; sweep < num_sweeps; sweep++) { const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1; const HYPRE_Int ibegin = iorder > 0 ? 0 : num_rows - 1; const HYPRE_Int iend = iorder > 0 ? num_rows : -1; if (Topo_order) { hypre_HybridGaussSeidelOrderedNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, u_data, NULL, v_ext_data, ibegin, iend, iorder, proc_ordering); } else { if (non_scale) { hypre_HybridGaussSeidelNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag); } else { hypre_HybridGaussSeidel(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data, f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega, prod, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag); } } } /* for (sweep = 0; sweep < num_sweeps; sweep++) */ } /* if (num_threads > 1) */ #ifndef HYPRE_USING_PERSISTENT_COMM if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* forward hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax3HybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, 1 /* skip diag */, 0, 0); } } /* backward hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax4HybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nosymm */, 1 /* skip diag */, 0, 0); } } /* chaotic forward G-S */ HYPRE_Int hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *v_ext_data = NULL; HYPRE_Complex *v_buf_data = NULL; HYPRE_Complex zero = 0.0; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F or All) and diagonal is * nonzero, relax point i; otherwise, skip it. * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------*/ if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero ) { res = f_data[i]; for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * v_ext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } if (num_procs > 1) { hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /* symmetric hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax6HybridSSOR( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1, 1 /* symm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1, 1 /* symm */, 1 /* skip diag */, 0, 0); } } HYPRE_Int hypre_BoomerAMGRelax7Jacobi( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp ) { HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector l1_norms_vec; hypre_ParVector l1_norms_parvec; hypre_VectorData(&l1_norms_vec) = l1_norms; hypre_VectorSize(&l1_norms_vec) = num_rows; /* TODO XXX * The next line is NOT 100% correct, which should be the memory location of l1_norms instead of f * But how do I know it? As said, don't use raw pointers, don't use raw pointers! * It is fine normally since A, f, and l1_norms should live in the same memory space */ hypre_VectorMemoryLocation(&l1_norms_vec) = hypre_ParVectorMemoryLocation(f); hypre_ParVectorLocalVector(&l1_norms_parvec) = &l1_norms_vec; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*----------------------------------------------------------------- * Copy f into temporary vector. *-----------------------------------------------------------------*/ hypre_ParVectorCopy(f, Vtemp); /*----------------------------------------------------------------- * Perform Matvec Vtemp = w * (f - Au) *-----------------------------------------------------------------*/ hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, Vtemp); /*----------------------------------------------------------------- * u += D^{-1} * Vtemp, where D_ii = ||A(i,:)||_1 *-----------------------------------------------------------------*/ if (relax_points) { hypre_ParVectorElmdivpyMarked(Vtemp, &l1_norms_parvec, u, cf_marker, relax_points); } else { hypre_ParVectorElmdivpy(Vtemp, &l1_norms_parvec, u); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif return hypre_error_flag; } /* symmetric l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax8HybridL1SSOR( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 1 /* symm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 1 /* symm */, skip_diag, 0, 0); } } /* forward hybrid topology ordered G-S */ HYPRE_Int hypre_BoomerAMGRelax10TopoOrderedGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, 1 /* skip_diag */, 1, 1); } /* forward l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax13HybridL1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, 1 /* forward */, 0 /* nonsymm */, skip_diag, 0, 0 ); } } /* backward l1 hybrid G-S */ HYPRE_Int hypre_BoomerAMGRelax14HybridL1GaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_VectorMemoryLocation(f) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; // TODO implement CF relax on GPUs if (relax_points != 0) { exec = HYPRE_EXEC_HOST; } #if defined(HYPRE_USING_GPU) if (hypre_HandleDeviceGSMethod(hypre_handle()) == 0) { exec = HYPRE_EXEC_HOST; } #endif if (exec == HYPRE_EXEC_DEVICE) { return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, -1, 0 /* nonsymm */); } else #endif { return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp, -1 /* backward */, 0 /* nonsymm */, skip_diag, 0, 0 ); } } HYPRE_Int hypre_BoomerAMGRelax19GaussElim( hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Int i, jj, column, relax_error = 0; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (num_rows) { A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { column = A_CSR_j[jj]; A_mat[i*n_global+column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } hypre_gselim(A_mat, b_vec, n_global, relax_error); for (i = 0; i < num_rows; i++) { u_data[i] = b_vec[first_index + i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelax98GaussElimPivot( hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Int i, jj, column, relax_error = 0; HYPRE_Int info; HYPRE_Int one_i = 1; HYPRE_Int *piv; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); if (num_rows) { A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { /* need col major */ column = A_CSR_j[jj]; A_mat[i + n_global*column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST); /* write over A with LU */ hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info); /*now b_vec = inv(A)*b_vec */ hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info); hypre_TFree(piv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } return relax_error; } HYPRE_Int hypre_BoomerAMGRelaxKaczmarz( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Complex *f_data = hypre_VectorData(f_local); HYPRE_Complex *u_offd_data = NULL; HYPRE_Complex *u_buf_data = NULL; HYPRE_Complex res; HYPRE_Int num_procs, my_id, i, j, index, num_sends, start; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, u_buf_data, u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_j[j]]; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { res -= A_offd_data[j] * u_offd_data[A_offd_j[j]]; } res /= l1_norms[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { u_data[A_diag_j[j]] += omega * res * A_diag_data[j]; } } /* Backward local pass */ for (i = num_rows - 1; i > -1; i--) { res = f_data[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_j[j]]; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { res -= A_offd_data[j] * u_offd_data[A_offd_j[j]]; } res /= l1_norms[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { u_data[A_diag_j[j]] += omega * res * A_diag_data[j]; } } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelaxTwoStageGaussSeidelHost( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, HYPRE_Int num_inner_iters) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Complex *u_data = hypre_VectorData(u_local); HYPRE_Int i, k, jj, ii; HYPRE_Complex multiplier = 1.0; /* Need to check that EVERY diagonal is nonzero first. If any are, throw exception */ for (i = 0; i < num_rows; i++) { if (A_diag_data[A_diag_i[i]] == 0.0) { hypre_error_in_arg(1); } } hypre_ParCSRMatrixMatvecOutOfPlace(-relax_weight, A, u, relax_weight, f, Vtemp); for (i = 0; i < num_rows; i++) /* Run the smoother */ { // V = V/D Vtemp_data[i] /= A_diag_data[A_diag_i[i]]; // u = u + m*v u_data[i] += multiplier * Vtemp_data[i]; } // adjust for the alternating series multiplier *= -1.0; for (k = 0; k < num_inner_iters; ++k) { // By going from bottom to top, we can update Vtemp in place because // we're operating with the strict, lower triangular matrix for (i = num_rows-1; i >=0; i--) /* Run the smoother */ { // spmv for the row first HYPRE_Complex res = 0.0; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii < i) { res += A_diag_data[jj] * Vtemp_data[ii]; } } // diagonal scaling has to come after the spmv accumulation. It's a row scaling // not column Vtemp_data[i] = res / A_diag_data[A_diag_i[i]]; u_data[i] += multiplier * Vtemp_data[i]; } // adjust for the alternating series multiplier *= -1.0; } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax11TwoStageGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 1); } else #endif { hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 1); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGRelax12TwoStageGaussSeidel( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 2); } else #endif { hypre_BoomerAMGRelaxTwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp, 2); } return hypre_error_flag; }
ccv_bbf.c
#include "ccv.h" #include "ccv_internal.h" #include <sys/time.h> #ifdef HAVE_GSL #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #endif #ifdef USE_OPENMP #include <omp.h> #endif const ccv_bbf_param_t ccv_bbf_default_params = { .interval = 5, .min_neighbors = 2, .accurate = 1, .flags = 0, .size = { 24, 24, }, }; #define _ccv_width_padding(x) (((x) + 3) & -4) static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8) { #define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]])) #define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]])) unsigned char pmin = pf_at(0), nmax = nf_at(0); /* check if every point in P > every point in N, and take a shortcut */ if (pmin <= nmax) return 0; int i; for (i = 1; i < feature->size; i++) { if (feature->pz[i] >= 0) { int p = pf_at(i); if (p < pmin) { if (p <= nmax) return 0; pmin = p; } } if (feature->nz[i] >= 0) { int n = nf_at(i); if (n > nmax) { if (pmin <= n) return 0; nmax = n; } } } #undef pf_at #undef nf_at return 1; } static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier) { FILE* r = fopen(file, "r"); if (r == 0) return -1; int stat = 0; stat |= fscanf(r, "%d", &classifier->count); union { float fl; int i; } fli; stat |= fscanf(r, "%d", &fli.i); classifier->threshold = fli.fl; classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t)); classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float)); int i, j; for (i = 0; i < classifier->count; i++) { stat |= fscanf(r, "%d", &classifier->feature[i].size); for (j = 0; j < classifier->feature[i].size; j++) { stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]); stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]); } union { float fl; int i; } flia, flib; stat |= fscanf(r, "%d %d", &flia.i, &flib.i); classifier->alpha[i * 2] = flia.fl; classifier->alpha[i * 2 + 1] = flib.fl; } fclose(r); return 0; } #ifdef HAVE_GSL static unsigned int _ccv_bbf_time_measure() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec * 1000000 + tv.tv_usec; } #define less_than(a, b, aux) ((a) < (b)) CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than) #undef less_than static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval) { int i, j; int steps[] = { _ccv_width_padding(size.width), _ccv_width_padding(size.width >> 1), _ccv_width_padding(size.width >> 2) }; int isizs0 = steps[0] * size.height; int isizs01 = isizs0 + steps[1] * (size.height >> 1); for (i = 0; i < posnum; i++) { unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 }; float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; peval[i] = sum; } for (i = 0; i < negnum; i++) { unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 }; float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; neval[i] = sum; } } static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size) { float* peval = (float*)ccmalloc(posnum * sizeof(float)); int i, j, k, rpos = posnum; for (i = 0; i < cascade->count; i++) { _ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0); k = 0; for (j = 0; j < rpos; j++) if (peval[j] >= cascade->stage_classifier[i].threshold) { posdata[k] = posdata[j]; ++k; } else { ccfree(posdata[j]); } rpos = k; } ccfree(peval); return rpos; } static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum) { int t, i, j, k, q; int negperbg; int negtotal = 0; int steps[] = { _ccv_width_padding(cascade->size.width), _ccv_width_padding(cascade->size.width >> 1), _ccv_width_padding(cascade->size.width >> 2) }; int isizs0 = steps[0] * cascade->size.height; int isizs1 = steps[1] * (cascade->size.height >> 1); int isizs2 = steps[2] * (cascade->size.height >> 2); int* idcheck = (int*)ccmalloc(negnum * sizeof(int)); gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); gsl_rng_set(rng, (unsigned long int)idcheck); ccv_size_t imgsz = cascade->size; int rneg = negtotal; for (t = 0; negtotal < negnum; t++) { printf("preparing negative data ... 0%%"); for (i = 0; i < bgnum; i++) { negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal; ccv_dense_matrix_t* image = 0; ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE); assert((image->type & CCV_C1) && (image->type & CCV_8U)); if (image == 0) { printf("\n%s file corrupted\n", bgfiles[i]); continue; } if (t % 2 != 0) ccv_flip(image, 0, 0, CCV_FLIP_X); if (t % 4 >= 2) ccv_flip(image, 0, 0, CCV_FLIP_Y); ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size }; ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params); memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int)); for (j = 0; j < ccv_min(detected->rnum, negperbg); j++) { int r = gsl_rng_uniform_int(rng, detected->rnum); int flag = 1; ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r); while (flag) { flag = 0; for (k = 0; k < j; k++) if (r == idcheck[k]) { flag = 1; r = gsl_rng_uniform_int(rng, detected->rnum); break; } rect = (ccv_rect_t*)ccv_array_get(detected, r); if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x >= image->cols) || (rect->height + rect->y >= image->rows)) { flag = 1; r = gsl_rng_uniform_int(rng, detected->rnum); } } idcheck[j] = r; ccv_dense_matrix_t* temp = 0; ccv_dense_matrix_t* imgs0 = 0; ccv_dense_matrix_t* imgs1 = 0; ccv_dense_matrix_t* imgs2 = 0; ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width); ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA); assert(imgs0->step == steps[0]); ccv_matrix_free(temp); ccv_sample_down(imgs0, &imgs1, 0, 0, 0); assert(imgs1->step == steps[1]); ccv_sample_down(imgs1, &imgs2, 0, 0, 0); assert(imgs2->step == steps[2]); negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2); unsigned char* u8s0 = negdata[negtotal]; unsigned char* u8s1 = negdata[negtotal] + isizs0; unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1; unsigned char* u8[] = { u8s0, u8s1, u8s2 }; memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step); ccv_matrix_free(imgs0); memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step); ccv_matrix_free(imgs1); memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step); ccv_matrix_free(imgs2); flag = 1; ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (k = 0; k < cascade->count; ++k, ++classifier) { float sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; if (sum < classifier->threshold) { flag = 0; break; } } if (!flag) ccfree(negdata[negtotal]); else { ++negtotal; if (negtotal >= negnum) break; } } ccv_array_free(detected); ccv_matrix_free(image); ccv_drain_cache(); printf("\rpreparing negative data ... %2d%%", 100 * negtotal / negnum); fflush(0); if (negtotal >= negnum) break; } if (rneg == negtotal) break; rneg = negtotal; printf("\nentering additional round %d\n", t + 1); } gsl_rng_free(rng); ccfree(idcheck); ccv_drain_cache(); printf("\n"); return negtotal; } static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum) { printf("preparing positive data ... 0%%"); int i; for (i = 0; i < posnum; i++) { ccv_dense_matrix_t* imgs0 = posimg[i]; ccv_dense_matrix_t* imgs1 = 0; ccv_dense_matrix_t* imgs2 = 0; assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width); ccv_sample_down(imgs0, &imgs1, 0, 0, 0); ccv_sample_down(imgs1, &imgs2, 0, 0, 0); int isizs0 = imgs0->rows * imgs0->step; int isizs1 = imgs1->rows * imgs1->step; int isizs2 = imgs2->rows * imgs2->step; posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2); memcpy(posdata[i], imgs0->data.u8, isizs0); memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1); memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2); printf("\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum); fflush(0); ccv_matrix_free(imgs1); ccv_matrix_free(imgs2); } ccv_drain_cache(); printf("\n"); } typedef struct { double fitness; int pk, nk; int age; double error; ccv_bbf_feature_t feature; } ccv_bbf_gene_t; static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene) { gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015)); } static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z) { int i; for (i = 0; i < gene->pk; i++) if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i]) return 1; for (i = 0; i < gene->nk; i++) if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i]) return 1; return 0; } static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols) { int i; do { gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1; gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1; } while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */ gene->feature.size = ccv_max(gene->pk, gene->nk); gene->age = 0; for (i = 0; i < CCV_BBF_POINT_MAX; i++) { gene->feature.pz[i] = -1; gene->feature.nz[i] = -1; } int x, y, z; for (i = 0; i < gene->pk; i++) { do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(gene, x, y, z)); gene->feature.pz[i] = z; gene->feature.px[i] = x; gene->feature.py[i] = y; } for (i = 0; i < gene->nk; i++) { do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while ( _ccv_bbf_exist_gene_feature(gene, x, y, z)); gene->feature.nz[i] = z; gene->feature.nx[i] = x; gene->feature.ny[i] = y; } } static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw) { int i; int steps[] = { _ccv_width_padding(size.width), _ccv_width_padding(size.width >> 1), _ccv_width_padding(size.width >> 2) }; int isizs0 = steps[0] * size.height; int isizs01 = isizs0 + steps[1] * (size.height >> 1); double error = 0; for (i = 0; i < posnum; i++) { unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 }; if (!_ccv_run_bbf_feature(feature, steps, u8)) error += pw[i]; } for (i = 0; i < negnum; i++) { unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 }; if ( _ccv_run_bbf_feature(feature, steps, u8)) error += nw[i]; } return error; } #define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness) static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than) #undef less_than static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw) { ccv_bbf_feature_t best; /* seed (random method) */ gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); union { unsigned long int li; double db; } dbli; dbli.db = pw[0] + nw[0]; gsl_rng_set(rng, dbli.li); int i, j; int pnum = ftnum * 100; assert(pnum > 0); ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t)); int rows[] = { size.height, size.height >> 1, size.height >> 2 }; int cols[] = { size.width, size.width >> 1, size.width >> 2 }; for (i = 0; i < pnum; i++) _ccv_bbf_randomize_gene(rng, &gene[i], rows, cols); unsigned int timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; for (i = 0; i < pnum; i++) _ccv_bbf_genetic_fitness(&gene[i]); double best_err = 1; int rnum = ftnum * 39; /* number of randomize */ int mnum = ftnum * 40; /* number of mutation */ int hnum = ftnum * 20; /* number of hybrid */ /* iteration stop crit : best no change in 40 iterations */ int it = 0, t; for (t = 0 ; it < 40; ++it, ++t) { int min_id = 0; double min_err = gene[0].error; for (i = 1; i < pnum; i++) if (gene[i].error < min_err) { min_id = i; min_err = gene[i].error; } min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw); if (min_err < best_err) { best_err = min_err; memcpy(&best, &gene[min_id].feature, sizeof(best)); printf("best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size); for (i = 0; i < best.size; i++) printf("(%d %d %d), ", best.px[i], best.py[i], best.pz[i]); printf("\n|-negative point: "); for (i = 0; i < best.size; i++) printf("(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]); printf("\n"); it = 0; } printf("minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000); _ccv_bbf_genetic_qsort(gene, pnum, 0); for (i = 0; i < ftnum; i++) ++gene[i].age; for (i = ftnum; i < ftnum + mnum; i++) { int parent = gsl_rng_uniform_int(rng, ftnum); memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t)); /* three mutation strategy : 1. add, 2. remove, 3. refine */ int pnm, pn = gsl_rng_uniform_int(rng, 2); int* pnk[] = { &gene[i].pk, &gene[i].nk }; int* pnx[] = { gene[i].feature.px, gene[i].feature.nx }; int* pny[] = { gene[i].feature.py, gene[i].feature.ny }; int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz }; int x, y, z; int victim, decay = 1; do { switch (gsl_rng_uniform_int(rng, 3)) { case 0: /* add */ if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX) break; while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX) pn = gsl_rng_uniform_int(rng, 2); do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z)); pnz[pn][*pnk[pn]] = z; pnx[pn][*pnk[pn]] = x; pny[pn][*pnk[pn]] = y; ++(*pnk[pn]); gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); decay = gene[i].age = 0; break; case 1: /* remove */ if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */ break; while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN) pn = gsl_rng_uniform_int(rng, 2); victim = gsl_rng_uniform_int(rng, *pnk[pn]); for (j = victim; j < *pnk[pn] - 1; j++) { pnz[pn][j] = pnz[pn][j + 1]; pnx[pn][j] = pnx[pn][j + 1]; pny[pn][j] = pny[pn][j + 1]; } pnz[pn][*pnk[pn] - 1] = -1; --(*pnk[pn]); gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); decay = gene[i].age = 0; break; case 2: /* refine */ pnm = gsl_rng_uniform_int(rng, *pnk[pn]); do { z = gsl_rng_uniform_int(rng, 3); x = gsl_rng_uniform_int(rng, cols[z]); y = gsl_rng_uniform_int(rng, rows[z]); } while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z)); pnz[pn][pnm] = z; pnx[pn][pnm] = x; pny[pn][pnm] = y; decay = gene[i].age = 0; break; } } while (decay); } for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++) { /* hybrid strategy: taking positive points from dad, negative points from mum */ int dad, mum; do { dad = gsl_rng_uniform_int(rng, ftnum); mum = gsl_rng_uniform_int(rng, ftnum); } while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */ for (j = 0; j < CCV_BBF_POINT_MAX; j++) { gene[i].feature.pz[j] = -1; gene[i].feature.nz[j] = -1; } gene[i].pk = gene[dad].pk; for (j = 0; j < gene[i].pk; j++) { gene[i].feature.pz[j] = gene[dad].feature.pz[j]; gene[i].feature.px[j] = gene[dad].feature.px[j]; gene[i].feature.py[j] = gene[dad].feature.py[j]; } gene[i].nk = gene[mum].nk; for (j = 0; j < gene[i].nk; j++) { gene[i].feature.nz[j] = gene[mum].feature.nz[j]; gene[i].feature.nx[j] = gene[mum].feature.nx[j]; gene[i].feature.ny[j] = gene[mum].feature.ny[j]; } gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk); gene[i].age = 0; } for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++) _ccv_bbf_randomize_gene(rng, &gene[i], rows, cols); timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; for (i = 0; i < pnum; i++) _ccv_bbf_genetic_fitness(&gene[i]); } ccfree(gene); gsl_rng_free(rng); return best; } #define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error) static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than) #undef less_than static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw) { int i; unsigned int timer = _ccv_bbf_time_measure(); #ifdef USE_OPENMP #pragma omp parallel for private(i) schedule(dynamic) #endif for (i = 0; i < pnum; i++) gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw); timer = _ccv_bbf_time_measure() - timer; _ccv_bbf_best_qsort(gene, pnum, 0); int min_id = 0; double min_err = gene[0].error; for (i = 0; i < pnum; i++) if (gene[i].nk + gene[i].pk >= point_min) { min_id = i; min_err = gene[i].error; break; } printf("local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size); for (i = 0; i < gene[min_id].feature.size; i++) printf("(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]); printf("\n|-negative point: "); for (i = 0; i < gene[min_id].feature.size; i++) printf("(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]); printf("\nthe computation takes %d ms\n", timer / 1000); return gene[min_id]; } static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw) { ccv_bbf_gene_t best_gene; /* seed (random method) */ gsl_rng_env_setup(); gsl_rng* rng = gsl_rng_alloc(gsl_rng_default); union { unsigned long int li; double db; } dbli; dbli.db = pw[0] + nw[0]; gsl_rng_set(rng, dbli.li); int i, j, k, q, p, g, t; int rows[] = { size.height, size.height >> 1, size.height >> 2 }; int cols[] = { size.width, size.width >> 1, size.width >> 2 }; int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2]; ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t)); if (best_feature == 0) { /* bootstrapping the best feature, start from two pixels, one for positive, one for negative * the bootstrapping process go like this: first, it will assign a random pixel as positive * and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every * possible pixel as positive, and pick the best one, until it converges */ memset(&best_gene, 0, sizeof(ccv_bbf_gene_t)); for (i = 0; i < CCV_BBF_POINT_MAX; i++) best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1; best_gene.pk = 1; best_gene.nk = 0; best_gene.feature.size = 1; best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3); best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]); best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]); for (t = 0; ; ++t) { g = 0; if (t % 2 == 0) { for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0]) { gene[g] = best_gene; gene[g].pk = gene[g].nk = 1; gene[g].feature.nz[0] = i; gene[g].feature.nx[0] = j; gene[g].feature.ny[0] = k; g++; } } else { for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0]) { gene[g] = best_gene; gene[g].pk = gene[g].nk = 1; gene[g].feature.pz[0] = i; gene[g].feature.px[0] = j; gene[g].feature.py[0] = k; g++; } } printf("bootstrapping round : %d\n", t); ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw); if (local_gene.error >= best_gene.error - 1e-10) break; best_gene = local_gene; } } else { best_gene.feature = *best_feature; best_gene.pk = best_gene.nk = best_gene.feature.size; for (i = 0; i < CCV_BBF_POINT_MAX; i++) if (best_feature->pz[i] == -1) { best_gene.pk = i; break; } for (i = 0; i < CCV_BBF_POINT_MAX; i++) if (best_feature->nz[i] == -1) { best_gene.nk = i; break; } } /* after bootstrapping, the float search technique will do the following permutations: * a). add a new point to positive or negative * b). remove a point from positive or negative * c). move an existing point in positive or negative to another position * the three rules applied exhaustively, no heuristic used. */ for (t = 0; ; ++t) { g = 0; for (i = 0; i < 3; i++) for (j = 0; j < cols[i]; j++) for (k = 0; k < rows[i]; k++) if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i)) { /* add positive point */ if (best_gene.pk < CCV_BBF_POINT_MAX - 1) { gene[g] = best_gene; gene[g].feature.pz[gene[g].pk] = i; gene[g].feature.px[gene[g].pk] = j; gene[g].feature.py[gene[g].pk] = k; gene[g].pk++; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } /* add negative point */ if (best_gene.nk < CCV_BBF_POINT_MAX - 1) { gene[g] = best_gene; gene[g].feature.nz[gene[g].nk] = i; gene[g].feature.nx[gene[g].nk] = j; gene[g].feature.ny[gene[g].nk] = k; gene[g].nk++; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } /* refine positive point */ for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; gene[g].feature.pz[q] = i; gene[g].feature.px[q] = j; gene[g].feature.py[q] = k; g++; } /* add positive point, remove negative point */ if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1) { for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; gene[g].feature.pz[gene[g].pk] = i; gene[g].feature.px[gene[g].pk] = j; gene[g].feature.py[gene[g].pk] = k; gene[g].pk++; for (p = q; p < best_gene.nk - 1; p++) { gene[g].feature.nz[p] = gene[g].feature.nz[p + 1]; gene[g].feature.nx[p] = gene[g].feature.nx[p + 1]; gene[g].feature.ny[p] = gene[g].feature.ny[p + 1]; } gene[g].feature.nz[gene[g].nk - 1] = -1; gene[g].nk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } } /* refine negative point */ for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; gene[g].feature.nz[q] = i; gene[g].feature.nx[q] = j; gene[g].feature.ny[q] = k; g++; } /* add negative point, remove positive point */ if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1) { for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; gene[g].feature.nz[gene[g].nk] = i; gene[g].feature.nx[gene[g].nk] = j; gene[g].feature.ny[gene[g].nk] = k; gene[g].nk++; for (p = q; p < best_gene.pk - 1; p++) { gene[g].feature.pz[p] = gene[g].feature.pz[p + 1]; gene[g].feature.px[p] = gene[g].feature.px[p + 1]; gene[g].feature.py[p] = gene[g].feature.py[p + 1]; } gene[g].feature.pz[gene[g].pk - 1] = -1; gene[g].pk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } } } if (best_gene.pk > 1) for (q = 0; q < best_gene.pk; q++) { gene[g] = best_gene; for (i = q; i < best_gene.pk - 1; i++) { gene[g].feature.pz[i] = gene[g].feature.pz[i + 1]; gene[g].feature.px[i] = gene[g].feature.px[i + 1]; gene[g].feature.py[i] = gene[g].feature.py[i + 1]; } gene[g].feature.pz[gene[g].pk - 1] = -1; gene[g].pk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } if (best_gene.nk > 1) for (q = 0; q < best_gene.nk; q++) { gene[g] = best_gene; for (i = q; i < best_gene.nk - 1; i++) { gene[g].feature.nz[i] = gene[g].feature.nz[i + 1]; gene[g].feature.nx[i] = gene[g].feature.nx[i + 1]; gene[g].feature.ny[i] = gene[g].feature.ny[i + 1]; } gene[g].feature.nz[gene[g].nk - 1] = -1; gene[g].nk--; gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk); g++; } gene[g] = best_gene; g++; printf("float search round : %d\n", t); ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw); if (local_gene.error >= best_gene.error - 1e-10) break; best_gene = local_gene; } ccfree(gene); gsl_rng_free(rng); return best_gene.feature; } static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier) { FILE* w = fopen(file, "wb"); if (w == 0) return -1; fprintf(w, "%d\n", classifier->count); union { float fl; int i; } fli; fli.fl = classifier->threshold; fprintf(w, "%d\n", fli.i); int i, j; for (i = 0; i < classifier->count; i++) { fprintf(w, "%d\n", classifier->feature[i].size); for (j = 0; j < classifier->feature[i].size; j++) { fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]); fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]); } union { float fl; int i; } flia, flib; flia.fl = classifier->alpha[i * 2]; flib.fl = classifier->alpha[i * 2 + 1]; fprintf(w, "%d %d\n", flia.i, flib.i); } fclose(w); return 0; } static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size) { int stat = 0; FILE* r = fopen(file, "rb"); if (r == 0) return -1; stat |= fread(negnum, sizeof(int), 1, r); int i; int isizs012 = _ccv_width_padding(size.width) * size.height + _ccv_width_padding(size.width >> 1) * (size.height >> 1) + _ccv_width_padding(size.width >> 2) * (size.height >> 2); for (i = 0; i < *negnum; i++) { negdata[i] = (unsigned char*)ccmalloc(isizs012); stat |= fread(negdata[i], 1, isizs012, r); } fclose(r); return 0; } static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size) { FILE* w = fopen(file, "w"); if (w == 0) return -1; fwrite(&negnum, sizeof(int), 1, w); int i; int isizs012 = _ccv_width_padding(size.width) * size.height + _ccv_width_padding(size.width >> 1) * (size.height >> 1) + _ccv_width_padding(size.width >> 2) * (size.height >> 2); for (i = 0; i < negnum; i++) fwrite(negdata[i], 1, isizs012, w); fclose(w); return 0; } static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum) { int stat = 0; FILE* r = fopen(file, "r"); if (r == 0) return -1; stat |= fscanf(r, "%d %d %d", i, k, bg); int j; union { double db; int i[2]; } dbi; for (j = 0; j < posnum; j++) { stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]); pw[j] = dbi.db; } for (j = 0; j < negnum; j++) { stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]); nw[j] = dbi.db; } fclose(r); return 0; } static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum) { FILE* w = fopen(file, "w"); if (w == 0) return -1; fprintf(w, "%d %d %d\n", i, k, bg); int j; union { double db; int i[2]; } dbi; for (j = 0; j < posnum; ++j) { dbi.db = pw[j]; fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]); } fprintf(w, "\n"); for (j = 0; j < negnum; ++j) { dbi.db = nw[j]; fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]); } fprintf(w, "\n"); fclose(w); return 0; } void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params) { int i, j, k; /* allocate memory for usage */ ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); cascade->count = 0; cascade->size = size; cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t)); unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*)); unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*)); double* pw = (double*)ccmalloc(posnum * sizeof(double)); double* nw = (double*)ccmalloc(negnum * sizeof(double)); float* peval = (float*)ccmalloc(posnum * sizeof(float)); float* neval = (float*)ccmalloc(negnum * sizeof(float)); double inv_balance_k = 1. / params.balance_k; /* balance factor k, and weighted with 0.01 */ params.balance_k *= 0.01; inv_balance_k *= 0.01; int steps[] = { _ccv_width_padding(cascade->size.width), _ccv_width_padding(cascade->size.width >> 1), _ccv_width_padding(cascade->size.width >> 2) }; int isizs0 = steps[0] * cascade->size.height; int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1); i = 0; k = 0; int bg = 0; int cacheK = 10; /* state resume code */ char buf[1024]; sprintf(buf, "%s/stat.txt", dir); _ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum); if (i > 0) { cascade->count = i; ccfree(cascade->stage_classifier); cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t)); for (j = 0; j < i; j++) { sprintf(buf, "%s/stage-%d.txt", dir, j); _ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]); } } if (k > 0) cacheK = k; int rpos, rneg = 0; if (bg) { sprintf(buf, "%s/negs.txt", dir); _ccv_read_background_data(buf, negdata, &rneg, cascade->size); } for (; i < params.layer; i++) { if (!bg) { rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum); /* save state of background data */ sprintf(buf, "%s/negs.txt", dir); _ccv_write_background_data(buf, negdata, rneg, cascade->size); bg = 1; } double totalw; /* save state of cascade : level, weight etc. */ sprintf(buf, "%s/stat.txt", dir); _ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum); ccv_bbf_stage_classifier_t classifier; if (k > 0) { /* resume state of classifier */ sprintf( buf, "%s/stage-%d.txt", dir, i ); _ccv_read_bbf_stage_classifier(buf, &classifier); } else { /* initialize classifier */ for (j = 0; j < posnum; j++) pw[j] = params.balance_k; for (j = 0; j < rneg; j++) nw[j] = inv_balance_k; classifier.count = k; classifier.threshold = 0; classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t)); classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float)); } _ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum); rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size); printf("%d postivie data and %d negative data in training\n", rpos, rneg); /* reweight to 1.00 */ totalw = 0; for (j = 0; j < rpos; j++) totalw += pw[j]; for (j = 0; j < rneg; j++) totalw += nw[j]; for (j = 0; j < rpos; j++) pw[j] = pw[j] / totalw; for (j = 0; j < rneg; j++) nw[j] = nw[j] / totalw; for (; ; k++) { /* get overall true-positive, false-positive rate and threshold */ double tp = 0, fp = 0, etp = 0, efp = 0; _ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval); _ccv_sort_32f(peval, rpos, 0); classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6; for (j = 0; j < rpos; j++) { if (peval[j] >= 0) ++tp; if (peval[j] >= classifier.threshold) ++etp; } tp /= rpos; etp /= rpos; for (j = 0; j < rneg; j++) { if (neval[j] >= 0) ++fp; if (neval[j] >= classifier.threshold) ++efp; } fp /= rneg; efp /= rneg; printf("stage classifier real TP rate : %f, FP rate : %f\n", tp, fp); printf("stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold); if (k > 0) { /* save classifier state */ sprintf(buf, "%s/stage-%d.txt", dir, i); _ccv_write_bbf_stage_classifier(buf, &classifier); sprintf(buf, "%s/stat.txt", dir); _ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum); } if (etp > params.pos_crit && efp < params.neg_crit) break; /* TODO: more post-process is needed in here */ /* select the best feature in current distribution through genetic algorithm optimization */ ccv_bbf_feature_t best; if (params.optimizer == CCV_BBF_GENETIC_OPT) { best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw); } else if (params.optimizer == CCV_BBF_FLOAT_OPT) { best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw); } else { best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw); best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw); } double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw); double rw = (1 - err) / err; totalw = 0; /* reweight */ for (j = 0; j < rpos; j++) { unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 }; if (!_ccv_run_bbf_feature(&best, steps, u8)) pw[j] *= rw; pw[j] *= params.balance_k; totalw += pw[j]; } for (j = 0; j < rneg; j++) { unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 }; if (_ccv_run_bbf_feature(&best, steps, u8)) nw[j] *= rw; nw[j] *= inv_balance_k; totalw += nw[j]; } for (j = 0; j < rpos; j++) pw[j] = pw[j] / totalw; for (j = 0; j < rneg; j++) nw[j] = nw[j] / totalw; double c = log(rw); printf("coefficient of feature %d: %f\n", k + 1, c); classifier.count = k + 1; /* resizing classifier */ if (k >= cacheK) { ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t)); memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t)); ccfree(classifier.feature); float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float)); memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float)); ccfree(classifier.alpha); classifier.feature = feature; classifier.alpha = alpha; cacheK *= 2; } /* setup new feature */ classifier.feature[k] = best; classifier.alpha[k * 2] = -c; classifier.alpha[k * 2 + 1] = c; } cascade->count = i + 1; ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t)); ccfree(cascade->stage_classifier); stage_classifier[i] = classifier; cascade->stage_classifier = stage_classifier; k = 0; bg = 0; for (j = 0; j < rpos; j++) ccfree(posdata[j]); for (j = 0; j < rneg; j++) ccfree(negdata[j]); } ccfree(neval); ccfree(peval); ccfree(nw); ccfree(pw); ccfree(negdata); ccfree(posdata); ccfree(cascade); } #else void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params) { fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n"); } #endif static int _ccv_is_equal(const void* _r1, const void* _r2, void* data) { const ccv_comp_t* r1 = (const ccv_comp_t*)_r1; const ccv_comp_t* r2 = (const ccv_comp_t*)_r2; int distance = (int)(r1->rect.width * 0.25 + 0.5); return r2->rect.x <= r1->rect.x + distance && r2->rect.x >= r1->rect.x - distance && r2->rect.y <= r1->rect.y + distance && r2->rect.y >= r1->rect.y - distance && r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) && (int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width; } static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data) { const ccv_comp_t* r1 = (const ccv_comp_t*)_r1; const ccv_comp_t* r2 = (const ccv_comp_t*)_r2; int distance = (int)(r1->rect.width * 0.25 + 0.5); return r2->classification.id == r1->classification.id && r2->rect.x <= r1->rect.x + distance && r2->rect.x >= r1->rect.x - distance && r2->rect.y <= r1->rect.y + distance && r2->rect.y >= r1->rect.y - distance && r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) && (int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width; } ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params) { int hr = a->rows / params.size.height; int wr = a->cols / params.size.width; double scale = pow(2., 1. / (params.interval + 1.)); int next = params.interval + 1; int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale)); ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*)); memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*)); if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width) ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA); else pyr[0] = a; int i, j, k, t, x, y, q; for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++) ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA); for (i = next; i < scale_upto + next * 2; i++) ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0); if (params.accurate) for (i = next * 2; i < scale_upto + next * 2; i++) { ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0); ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1); ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1); } ccv_array_t* idx_seq; ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0); ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0); ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0); /* detect in multi scale */ for (t = 0; t < count; t++) { ccv_bbf_classifier_cascade_t* cascade = _cascade[t]; float scale_x = (float) params.size.width / (float) cascade->size.width; float scale_y = (float) params.size.height / (float) cascade->size.height; ccv_array_clear(seq); for (i = 0; i < scale_upto; i++) { int dx[] = {0, 1, 0, 1}; int dy[] = {0, 0, 1, 1}; int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2); int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step }; int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2); int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4, pyr[i * 4 + next * 4]->step * 2 - i_cols * 2, pyr[i * 4 + next * 8]->step - i_cols }; for (q = 0; q < (params.accurate ? 4 : 1); q++) { unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 }; for (y = 0; y < i_rows; y++) { for (x = 0; x < i_cols; x++) { float sum; int flag = 1; ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (j = 0; j < cascade->count; ++j, ++classifier) { sum = 0; float* alpha = classifier->alpha; ccv_bbf_feature_t* feature = classifier->feature; for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature) sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)]; if (sum < classifier->threshold) { flag = 0; break; } } if (flag) { ccv_comp_t comp; comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5)); comp.neighbors = 1; comp.classification.id = t; comp.classification.confidence = sum; ccv_array_push(seq, &comp); } u8[0] += 4; u8[1] += 2; u8[2] += 1; } u8[0] += paddings[0]; u8[1] += paddings[1]; u8[2] += paddings[2]; } } scale_x *= scale; scale_y *= scale; } /* the following code from OpenCV's haar feature implementation */ if(params.min_neighbors == 0) { for (i = 0; i < seq->rnum; i++) { ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i); ccv_array_push(result_seq, comp); } } else { idx_seq = 0; ccv_array_clear(seq2); // group retrieved rectangles in order to filter out noise int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0); ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t)); memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t)); // count number of neighbors for(i = 0; i < seq->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i); int idx = *(int*)ccv_array_get(idx_seq, i); if (comps[idx].neighbors == 0) comps[idx].classification.confidence = r1.classification.confidence; ++comps[idx].neighbors; comps[idx].rect.x += r1.rect.x; comps[idx].rect.y += r1.rect.y; comps[idx].rect.width += r1.rect.width; comps[idx].rect.height += r1.rect.height; comps[idx].classification.id = r1.classification.id; comps[idx].classification.confidence = ccv_max(comps[idx].classification.confidence, r1.classification.confidence); } // calculate average bounding box for(i = 0; i < ncomp; i++) { int n = comps[i].neighbors; if(n >= params.min_neighbors) { ccv_comp_t comp; comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n); comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n); comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n); comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n); comp.neighbors = comps[i].neighbors; comp.classification.id = comps[i].classification.id; comp.classification.confidence = comps[i].classification.confidence; ccv_array_push(seq2, &comp); } } // filter out small face rectangles inside large face rectangles for(i = 0; i < seq2->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i); int flag = 1; for(j = 0; j < seq2->rnum; j++) { ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j); int distance = (int)(r2.rect.width * 0.25 + 0.5); if(i != j && r1.classification.id == r2.classification.id && r1.rect.x >= r2.rect.x - distance && r1.rect.y >= r2.rect.y - distance && r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance && r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance && (r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3)) { flag = 0; break; } } if(flag) ccv_array_push(result_seq, &r1); } ccv_array_free(idx_seq); ccfree(comps); } } ccv_array_free(seq); ccv_array_free(seq2); ccv_array_t* result_seq2; /* the following code from OpenCV's haar feature implementation */ if (params.flags & CCV_BBF_NO_NESTED) { result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0); idx_seq = 0; // group retrieved rectangles in order to filter out noise int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0); ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t)); memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t)); // count number of neighbors for(i = 0; i < result_seq->rnum; i++) { ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i); int idx = *(int*)ccv_array_get(idx_seq, i); if (comps[idx].neighbors == 0 || comps[idx].classification.confidence < r1.classification.confidence) { comps[idx].classification.confidence = r1.classification.confidence; comps[idx].neighbors = 1; comps[idx].rect = r1.rect; comps[idx].classification.id = r1.classification.id; } } // calculate average bounding box for(i = 0; i < ncomp; i++) if(comps[i].neighbors) ccv_array_push(result_seq2, &comps[i]); ccv_array_free(result_seq); ccfree(comps); } else { result_seq2 = result_seq; } for (i = 1; i < scale_upto + next * 2; i++) ccv_matrix_free(pyr[i * 4]); if (params.accurate) for (i = next * 2; i < scale_upto + next * 2; i++) { ccv_matrix_free(pyr[i * 4 + 1]); ccv_matrix_free(pyr[i * 4 + 2]); ccv_matrix_free(pyr[i * 4 + 3]); } if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width) ccv_matrix_free(pyr[0]); return result_seq2; } ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory) { char buf[1024]; sprintf(buf, "%s/cascade.txt", directory); int s, i; FILE* r = fopen(buf, "r"); if (r == 0) return 0; ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height); assert(s > 0); cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); for (i = 0; i < cascade->count; i++) { sprintf(buf, "%s/stage-%d.txt", directory, i); if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0) { cascade->count = i; break; } } fclose(r); return cascade; } ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s) { int i; ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t)); memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count); memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width); memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height); ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t)); for (i = 0; i < cascade->count; i++, classifier++) { memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count); memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold); classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t)); classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float)); memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t); memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float); } return cascade; } int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen) { int i; int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height); ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier; for (i = 0; i < cascade->count; i++, classifier++) len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float); if (slen >= len) { memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count); memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width); memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height); classifier = cascade->stage_classifier; for (i = 0; i < cascade->count; i++, classifier++) { memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count); memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold); memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t); memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float); } } return len; } void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade) { int i; for (i = 0; i < cascade->count; ++i) { ccfree(cascade->stage_classifier[i].feature); ccfree(cascade->stage_classifier[i].alpha); } ccfree(cascade->stage_classifier); ccfree(cascade); }